Commit 9ea9b986 authored by tihmels's avatar tihmels
Browse files

.gitignore bearbeitet, und änderungen

parent 497b8e88
...@@ -20,7 +20,10 @@ Icon ...@@ -20,7 +20,10 @@ Icon
.Trashes .Trashes
.VolumeIcon.icns .VolumeIcon.icns
.com.apple.timemachine.donotpresent .com.apple.timemachine.donotpresent
basis_data/* basis_data/dataset/*
basis_data/sorted_set/*
basis_data/source_emotion/*
basis_data/source_images/*
# Directories potentially created on remote AFP share # Directories potentially created on remote AFP share
.AppleDB .AppleDB
......
...@@ -22,27 +22,28 @@ def normalize_face(face): ...@@ -22,27 +22,28 @@ def normalize_face(face):
return face return face
def locate_faces(image, scaleFactor=1.4, minNeighbors=3): def locate_faces(image, scaleFactor=1.4, minNeighbors=3, minSize=(20, 20)):
minx, miny = minSize
face = faceDet_one.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(40, 40), face = faceDet_one.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(minx, miny),
flags=cv2.CASCADE_SCALE_IMAGE) flags=cv2.CASCADE_SCALE_IMAGE)
if len(face) == 1: if len(face) == 1:
return face return face
face = faceDet_two.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(40, 40), face = faceDet_two.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(minx, miny),
flags=cv2.CASCADE_SCALE_IMAGE) flags=cv2.CASCADE_SCALE_IMAGE)
if len(face) == 1: if len(face) == 1:
return face return face
face = faceDet_three.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(40, 40), face = faceDet_three.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(minx, miny),
flags=cv2.CASCADE_SCALE_IMAGE) flags=cv2.CASCADE_SCALE_IMAGE)
if len(face) == 1: if len(face) == 1:
return face return face
face = faceDet_four.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(40, 40), face = faceDet_four.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(minx, miny),
flags=cv2.CASCADE_SCALE_IMAGE) flags=cv2.CASCADE_SCALE_IMAGE)
if len(face) == 1: if len(face) == 1:
......
...@@ -12,7 +12,7 @@ logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(m ...@@ -12,7 +12,7 @@ logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(m
args = sys.argv # liest Input Parameter args = sys.argv # liest Input Parameter
logging.debug('Fisherface training initialized') logging.debug('Fisherface training initialized')
file = open("gray_equalized.csv", "w") file = open("gray.csv", "w")
def _get_faces_from_emotion(emotion): def _get_faces_from_emotion(emotion):
""" """
...@@ -34,7 +34,8 @@ def image_preprocessing(image): ...@@ -34,7 +34,8 @@ def image_preprocessing(image):
""" """
img = cv2.imread(image) # open image img = cv2.imread(image) # open image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # convert to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # convert to grayscale
return gray equal = cv2.equalizeHist(gray)
return equal
def make_sets(): def make_sets():
training_data = [] training_data = []
...@@ -85,11 +86,13 @@ fishface = cv2.face.FisherFaceRecognizer_create() ...@@ -85,11 +86,13 @@ fishface = cv2.face.FisherFaceRecognizer_create()
# Now run it # Now run it
metascore = [] metascore = []
for i in range(0, 20): for i in range(0, 10):
correct = run_recognizer() correct = run_recognizer()
file.write("{}\n".format(int(correct)))
logging.debug("{} : {}%".format(i, int(correct))) logging.debug("{} : {}%".format(i, int(correct)))
file.write("{}, {}".format(i, int(correct)))
metascore.append(correct) metascore.append(correct)
file.close() file.close()
logging.debug("{} iterations - {}% average\n".format(len(metascore), np.mean(metascore))) logging.debug("{} iterations - {}% average\n".format(len(metascore), np.mean(metascore)))
......
...@@ -14,7 +14,6 @@ totalFiles: int = 0 ...@@ -14,7 +14,6 @@ totalFiles: int = 0
totalFaces: int = 0 totalFaces: int = 0
undetected: list = [] undetected: list = []
def detect_faces(emotion): def detect_faces(emotion):
files = glob.glob('basis_data/sorted_set/{}/*'.format(emotion)) # Holt alle Dateien zu einer Emotion aus dem sorted_set files = glob.glob('basis_data/sorted_set/{}/*'.format(emotion)) # Holt alle Dateien zu einer Emotion aus dem sorted_set
...@@ -29,7 +28,7 @@ def detect_faces(emotion): ...@@ -29,7 +28,7 @@ def detect_faces(emotion):
frame = cv2.imread(f) # Open image frame = cv2.imread(f) # Open image
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert image to grayscale gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert image to grayscale
facefeatures = locate_faces(gray, 1.05, 10) facefeatures = locate_faces(gray, 1.1, 10, (40, 40))
if facefeatures is '': if facefeatures is '':
undetected.append(f) undetected.append(f)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment