Commit 497b8e88 authored by tihmels's avatar tihmels

RingBuffer implementiert, Kommentare geschrieben, Emotionen vorerst auf drei eingeschränkt!

parent 46440f50
class RingBuffer:
def __init__(self, size):
self.data = [None for i in range(size)]
def append(self, x):
self.data.pop(0)
self.data.append(x)
def get(self):
return self.data
\ No newline at end of file
...@@ -3,7 +3,7 @@ This module contains face detections functions. ...@@ -3,7 +3,7 @@ This module contains face detections functions.
""" """
import cv2 import cv2
faceDet = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_default.xml') faceDet_one = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_default.xml')
faceDet_two = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_alt2.xml') faceDet_two = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_alt2.xml')
faceDet_three = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_alt.xml') faceDet_three = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_alt.xml')
faceDet_four = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_alt_tree.xml') faceDet_four = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_alt_tree.xml')
...@@ -12,40 +12,40 @@ faceDet_four = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalf ...@@ -12,40 +12,40 @@ faceDet_four = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalf
def find_faces(image): def find_faces(image):
faces_coordinates = locate_faces(image) faces_coordinates = locate_faces(image)
cutted_faces = [image[y:y + h, x:x + w] for (x, y, w, h) in faces_coordinates] cutted_faces = [image[y:y + h, x:x + w] for (x, y, w, h) in faces_coordinates]
normalized_faces = [_normalize_face(face) for face in cutted_faces] normalized_faces = [normalize_face(face) for face in cutted_faces]
return zip(normalized_faces, faces_coordinates) return zip(normalized_faces, faces_coordinates)
def _normalize_face(face): def normalize_face(face):
face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY) face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
face = cv2.resize(face, (250, 250)) face = cv2.resize(face, (250, 250))
return face return face
def locate_faces(image, scaleFactor=2, minNeighbors=4): def locate_faces(image, scaleFactor=1.4, minNeighbors=3):
face_one = faceDet.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(20, 20), face = faceDet_one.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(40, 40),
flags=cv2.CASCADE_SCALE_IMAGE) flags=cv2.CASCADE_SCALE_IMAGE)
if len(face_one) == 1: if len(face) == 1:
return face_one return face
face_two = faceDet_two.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(20, 20), face = faceDet_two.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(40, 40),
flags=cv2.CASCADE_SCALE_IMAGE) flags=cv2.CASCADE_SCALE_IMAGE)
if len(face_two) == 1: if len(face) == 1:
return face_two return face
face_three = faceDet_three.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(20, 20), face = faceDet_three.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(40, 40),
flags=cv2.CASCADE_SCALE_IMAGE) flags=cv2.CASCADE_SCALE_IMAGE)
if len(face_three) == 1: if len(face) == 1:
return face_three return face
face_four = faceDet_four.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(20, 20), face = faceDet_four.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(40, 40),
flags=cv2.CASCADE_SCALE_IMAGE) flags=cv2.CASCADE_SCALE_IMAGE)
if len(face_four) == 1: if len(face) == 1:
return face_four return face
return '' return ''
import glob import glob
from shutil import copyfile from shutil import copyfile
emotions = ["neutral", "anger", "contempt", "disgust", "fear", "happy", "sadness", "surprise"] # Define emotion order emotions = ["neutral", "anger", "contempt", "disgust", "fear", "happy", "sadness", "surprise"] # Emotionen - Reihenfolge wichtig!
participants = glob.glob("basis_data/source_emotion/*") # Returns a list of all folders with participant numbers participants = glob.glob("basis_data/source_emotion/*") # Eine Liste mit den Dateiordnern aller Teilnehmer
for x in participants: for x in participants:
part = "%s" % x[-4:] # store current participant number number = "%s" % x[-4:] # Teilnehmernummer
for sessions in glob.glob("basis_data/%s/*" % x): # Store list of sessions for current participant for sessions in glob.glob("%s/*" % x):
for files in glob.glob("basis_data/%s/*" % sessions): for files in glob.glob("%s/*" % sessions):
current_session = files[20:-30] current_session = files[31:-30] # Sessionnummer
file = open(files, 'r') file = open(files, 'r') # Öffne die zur aktuellen Emotion korrelierende .txt Datei
emotion = int( emotion = int(
float(file.readline())) # emotions are encoded as a float, readline as float, then convert to integer. float(file.readline())) # In der Datei steht die aktuell betrachtete Emotion, kodiert als float-Wert
sourcefile_emotion = glob.glob("basis_data/source_images/%s/%s/*" % (part, current_session))[ source_emotions = glob.glob("basis_data/source_images/%s/%s/*.png" % (number, current_session))
-1] # get path for last image in sequence, which contains the emotion source_emotions.sort()
sourcefile_neutral = glob.glob("basis_data/source_images/%s/%s/*" % (part, current_session))[ sourcefile_emotion = source_emotions[-1] # Das letzte Bild einer Sequenz ist die ausgeprägte Emotion
0] # do same for neutral image sourcefile_neutral = source_emotions[0] # Das erste Bild ist ein neutraler Ausdruck
dest_neut = "basis_data/sorted_set/neutral/%s" % sourcefile_neutral[25:] # Generate path to put neutral image # Erstelle neue Pfade zum einsortieren
dest_emot = "basis_data/sorted_set/%s/%s" % ( dest_neut = "basis_data/sorted_set/neutral/%s" % sourcefile_neutral[36:] # für den neutralen Ausdruck
emotions[emotion], sourcefile_emotion[25:]) # Do same for emotion containing image dest_emot = "basis_data/sorted_set/%s/%s" % (emotions[emotion], sourcefile_emotion[36:]) # und die Emotion
copyfile(sourcefile_neutral, dest_neut) # Copy file # Kopiere Dateien
copyfile(sourcefile_emotion, dest_emot) # Copy file copyfile(sourcefile_neutral, dest_neut)
\ No newline at end of file copyfile(sourcefile_emotion, dest_emot)
\ No newline at end of file
...@@ -8,19 +8,32 @@ import logging ...@@ -8,19 +8,32 @@ import logging
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s', logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M', datefmt='%m-%d %H:%M',
filename='logs/process_model.log') filename='logs/process_model.log')
args = sys.argv
args = sys.argv # liest Input Parameter
logging.debug('Fisherface training initialized') logging.debug('Fisherface training initialized')
def get_files_from_emotion(emotion): # Define function to get file list, randomly shuffle it and split 80/20 file = open("gray_equalized.csv", "w")
files = glob.glob('basis_data/dataset/%s/*' % emotion)
def _get_faces_from_emotion(emotion):
"""
Holt alle Dateien zu einer Emotion aus dem Dataset, mischt sie und teilt sie in ein Trainings- und Prognoseset.
:param emotion: Die Emotion
:return: training, prediction
"""
files = glob.glob('basis_data/dataset/{}/*'.format(emotion))
random.shuffle(files) random.shuffle(files)
training = files[:int(len(files) * 0.8)] # get first 80% of file list training = files[:int(len(files) * 0.8)] # get first 80% of file list
prediction = files[-int(len(files) * 0.2):] # get last 20% of file list prediction = files[-int(len(files) * 0.2):] # get last 20% of file list
return training, prediction return training, prediction
def image_preprocessing(item): def image_preprocessing(image):
image = cv2.imread(item) # open image """
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # convert to grayscale Preprocessing der Dateien
:param item: Bild
:return:
"""
img = cv2.imread(image) # open image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # convert to grayscale
return gray return gray
def make_sets(): def make_sets():
...@@ -29,7 +42,7 @@ def make_sets(): ...@@ -29,7 +42,7 @@ def make_sets():
prediction_data = [] prediction_data = []
prediction_labels = [] prediction_labels = []
for emotion in emotions: for emotion in emotions:
training, prediction = get_files_from_emotion(emotion) training, prediction = _get_faces_from_emotion(emotion)
# Append data to training and prediction list, and generate labels 0-7 # Append data to training and prediction list, and generate labels 0-7
for item in training: for item in training:
img = image_preprocessing(item) img = image_preprocessing(item)
...@@ -66,16 +79,18 @@ if len(args) > 1: ...@@ -66,16 +79,18 @@ if len(args) > 1:
tags = ', '.join(args[1:]) tags = ', '.join(args[1:])
logging.debug(tags.upper()) logging.debug(tags.upper())
emotions = ["anger", "disgust", "happy", "neutral", "surprise"] # Emotion list emotions = ["happy", "neutral", "surprise"] # Emotion list
fishface = cv2.face.FisherFaceRecognizer_create() fishface = cv2.face.FisherFaceRecognizer_create()
# Now run it # Now run it
metascore = [] metascore = []
for i in range(0, 10): for i in range(0, 20):
correct = run_recognizer() correct = run_recognizer()
logging.debug("{} : {}%".format(i, int(correct))) logging.debug("{} : {}%".format(i, int(correct)))
file.write("{}, {}".format(i, int(correct)))
metascore.append(correct) metascore.append(correct)
file.close()
logging.debug("{} iterations - {}% average\n".format(len(metascore), np.mean(metascore))) logging.debug("{} iterations - {}% average\n".format(len(metascore), np.mean(metascore)))
fishface.write('basis_data/models/detection_model.xml') fishface.write('basis_data/models/detection_model.xml')
\ No newline at end of file
...@@ -8,7 +8,7 @@ logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(name)-12s %(level ...@@ -8,7 +8,7 @@ logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(name)-12s %(level
filename='logs/sorted_set_facedetector.log', filename='logs/sorted_set_facedetector.log',
filemode='w') filemode='w')
emotions = ["neutral", "anger", "disgust", "happy", "surprise"] # Define emotions emotions = ["happy", "neutral", "surprise"] # Emotionen die verarbeitet werden sollen
totalFiles: int = 0 totalFiles: int = 0
totalFaces: int = 0 totalFaces: int = 0
...@@ -16,7 +16,7 @@ undetected: list = [] ...@@ -16,7 +16,7 @@ undetected: list = []
def detect_faces(emotion): def detect_faces(emotion):
files = glob.glob('basis_data/sorted_set/%s/*' % emotion) # Get list of all images with emotion files = glob.glob('basis_data/sorted_set/{}/*'.format(emotion)) # Holt alle Dateien zu einer Emotion aus dem sorted_set
global undetected global undetected
global totalFaces global totalFaces
...@@ -27,23 +27,24 @@ def detect_faces(emotion): ...@@ -27,23 +27,24 @@ def detect_faces(emotion):
fileNumber = 0 fileNumber = 0
for f in files: for f in files:
frame = cv2.imread(f) # Open image frame = cv2.imread(f) # Open image
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert image to grayscale
facefeatures = locate_faces(frame, 1.05, 6) facefeatures = locate_faces(gray, 1.05, 10)
if facefeatures is '': if facefeatures is '':
undetected.append(f) undetected.append(f)
# Cut and save face else:
for (x, y, w, h) in facefeatures: # get coordinates and size of rectangle containing face # Cut and save face
logging.debug("face found in file: {}".format(f)) for (x, y, w, h) in facefeatures: # get coordinates and size of rectangle containing face
totalFaces += 1 logging.debug("face found in file: {}".format(f))
frame = frame[y:y + h, x:x + w] # Cut the frame to size totalFaces += 1
gray = gray[y:y + h, x:x + w] # Cut the frame to size
try: try:
out = cv2.resize(frame, (250, 250)) # Resize face so all images have same size out = cv2.resize(gray, (250, 250)) # Resize face so all images have same size
cv2.imwrite('basis_data/dataset/%s/%s.jpg' % (emotion, fileNumber), out) # Write image cv2.imwrite('basis_data/dataset/{}/{}.jpg'.format(emotion, fileNumber), out) # Write image
except: except:
pass # If error, pass file pass # If error, pass file
totalFiles += 1 # Increment image number totalFiles += 1 # Increment image number
fileNumber += 1 fileNumber += 1
......
...@@ -3,14 +3,13 @@ This module is the main module in this package. It loads emotion recognition mod ...@@ -3,14 +3,13 @@ This module is the main module in this package. It loads emotion recognition mod
shows a webcam image, recognizes face and it's emotion and draw emotion on the image. shows a webcam image, recognizes face and it's emotion and draw emotion on the image.
""" """
from cv2 import WINDOW_NORMAL
import cv2 import cv2
from RingBuffer import RingBuffer
from WebcamVideoStream import WebcamVideoStream from WebcamVideoStream import WebcamVideoStream
from face_detect import find_faces from face_detect import find_faces
from image_commons import nparray_as_image, draw_with_alpha from image_commons import nparray_as_image, draw_with_alpha
import numpy as np
def _load_emoticons(emotions): def _load_emoticons(emotions):
...@@ -22,7 +21,7 @@ def _load_emoticons(emotions): ...@@ -22,7 +21,7 @@ def _load_emoticons(emotions):
return [nparray_as_image(cv2.imread('resources/graphics/%s.png' % emotion, -1), mode=None) for emotion in emotions] return [nparray_as_image(cv2.imread('resources/graphics/%s.png' % emotion, -1), mode=None) for emotion in emotions]
def show_webcam_and_run(model, emoticons, window_size=(800, 800), window_name='Mood Expression', update_time=1): def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name='Mood Expression', update_time=1):
""" """
Shows webcam image, detects faces and its emotions in real time and draw emoticons over those faces. Shows webcam image, detects faces and its emotions in real time and draw emoticons over those faces.
:param model: Learnt emotion detection model. :param model: Learnt emotion detection model.
...@@ -31,21 +30,27 @@ def show_webcam_and_run(model, emoticons, window_size=(800, 800), window_name='M ...@@ -31,21 +30,27 @@ def show_webcam_and_run(model, emoticons, window_size=(800, 800), window_name='M
:param window_name: Name of webcam image window. :param window_name: Name of webcam image window.
:param update_time: Image update time interval. :param update_time: Image update time interval.
""" """
cv2.namedWindow(window_name, WINDOW_NORMAL) cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
if window_size: if window_size:
width, height = window_size width, height = window_size
cv2.resizeWindow(window_name, width, height) cv2.resizeWindow(window_name, width, height)
vc = WebcamVideoStream(src=0).start() vc = WebcamVideoStream().start()
frame = vc.read() frame = vc.read()
puffer = RingBuffer(7) # Der RingBuffer speichert die letzten Predictions
while True: while True:
for normalized_face, (x, y, w, h) in find_faces(frame): for normalized_face, (x, y, w, h) in find_faces(frame):
prediction = model.predict(normalized_face) # do prediction prediction = model.predict(normalized_face) # do prediction
image_to_draw = emoticons[(prediction[0])] puffer.append(prediction[0]) # Speichere letzte Prediction
if x - 150 > 0 and y - 50 > 0 and w - 150 > 0 and h - 150 > 0: preds = puffer.get() # Hole Einträge als Array
draw_with_alpha(frame, image_to_draw, (x-150, y-50, w-150, h-150))
if not (any(x is None for x in preds)): # Kein Eintrag im RingBuffer ist None
unique, counts = np.unique(preds, return_counts=True) # Vorkommen der Predictions zählen
image_to_draw = emoticons[unique[0]] # häufigster Wert wird dargestellt
draw_with_alpha(frame, image_to_draw, (40, 40, 200, 200))
cv2.imshow(window_name, frame) cv2.imshow(window_name, frame)
frame = vc.read() frame = vc.read()
...@@ -59,7 +64,7 @@ def show_webcam_and_run(model, emoticons, window_size=(800, 800), window_name='M ...@@ -59,7 +64,7 @@ def show_webcam_and_run(model, emoticons, window_size=(800, 800), window_name='M
if __name__ == '__main__': if __name__ == '__main__':
emotions = ['neutral', 'anger', 'disgust', 'happy', 'surprise'] emotions = ['happy', 'neutral', 'surprise']
emoticons = _load_emoticons(emotions) emoticons = _load_emoticons(emotions)
# load mode # load mode
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment