Commit 497b8e88 authored by tihmels's avatar tihmels

RingBuffer implementiert, Kommentare geschrieben, Emotionen vorerst auf drei eingeschränkt!

parent 46440f50
class RingBuffer:
def __init__(self, size):
self.data = [None for i in range(size)]
def append(self, x):
self.data.pop(0)
self.data.append(x)
def get(self):
return self.data
\ No newline at end of file
......@@ -3,7 +3,7 @@ This module contains face detections functions.
"""
import cv2
faceDet = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_default.xml')
faceDet_one = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_default.xml')
faceDet_two = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_alt2.xml')
faceDet_three = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_alt.xml')
faceDet_four = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_alt_tree.xml')
......@@ -12,40 +12,40 @@ faceDet_four = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalf
def find_faces(image):
faces_coordinates = locate_faces(image)
cutted_faces = [image[y:y + h, x:x + w] for (x, y, w, h) in faces_coordinates]
normalized_faces = [_normalize_face(face) for face in cutted_faces]
normalized_faces = [normalize_face(face) for face in cutted_faces]
return zip(normalized_faces, faces_coordinates)
def _normalize_face(face):
def normalize_face(face):
face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
face = cv2.resize(face, (250, 250))
return face
def locate_faces(image, scaleFactor=2, minNeighbors=4):
def locate_faces(image, scaleFactor=1.4, minNeighbors=3):
face_one = faceDet.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(20, 20),
face = faceDet_one.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(40, 40),
flags=cv2.CASCADE_SCALE_IMAGE)
if len(face_one) == 1:
return face_one
if len(face) == 1:
return face
face_two = faceDet_two.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(20, 20),
face = faceDet_two.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(40, 40),
flags=cv2.CASCADE_SCALE_IMAGE)
if len(face_two) == 1:
return face_two
if len(face) == 1:
return face
face_three = faceDet_three.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(20, 20),
face = faceDet_three.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(40, 40),
flags=cv2.CASCADE_SCALE_IMAGE)
if len(face_three) == 1:
return face_three
if len(face) == 1:
return face
face_four = faceDet_four.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(20, 20),
face = faceDet_four.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(40, 40),
flags=cv2.CASCADE_SCALE_IMAGE)
if len(face_four) == 1:
return face_four
if len(face) == 1:
return face
return ''
import glob
from shutil import copyfile
emotions = ["neutral", "anger", "contempt", "disgust", "fear", "happy", "sadness", "surprise"] # Define emotion order
participants = glob.glob("basis_data/source_emotion/*") # Returns a list of all folders with participant numbers
emotions = ["neutral", "anger", "contempt", "disgust", "fear", "happy", "sadness", "surprise"] # Emotionen - Reihenfolge wichtig!
participants = glob.glob("basis_data/source_emotion/*") # Eine Liste mit den Dateiordnern aller Teilnehmer
for x in participants:
part = "%s" % x[-4:] # store current participant number
for sessions in glob.glob("basis_data/%s/*" % x): # Store list of sessions for current participant
for files in glob.glob("basis_data/%s/*" % sessions):
current_session = files[20:-30]
file = open(files, 'r')
number = "%s" % x[-4:] # Teilnehmernummer
for sessions in glob.glob("%s/*" % x):
for files in glob.glob("%s/*" % sessions):
current_session = files[31:-30] # Sessionnummer
file = open(files, 'r') # Öffne die zur aktuellen Emotion korrelierende .txt Datei
emotion = int(
float(file.readline())) # emotions are encoded as a float, readline as float, then convert to integer.
float(file.readline())) # In der Datei steht die aktuell betrachtete Emotion, kodiert als float-Wert
sourcefile_emotion = glob.glob("basis_data/source_images/%s/%s/*" % (part, current_session))[
-1] # get path for last image in sequence, which contains the emotion
sourcefile_neutral = glob.glob("basis_data/source_images/%s/%s/*" % (part, current_session))[
0] # do same for neutral image
source_emotions = glob.glob("basis_data/source_images/%s/%s/*.png" % (number, current_session))
source_emotions.sort()
sourcefile_emotion = source_emotions[-1] # Das letzte Bild einer Sequenz ist die ausgeprägte Emotion
sourcefile_neutral = source_emotions[0] # Das erste Bild ist ein neutraler Ausdruck
dest_neut = "basis_data/sorted_set/neutral/%s" % sourcefile_neutral[25:] # Generate path to put neutral image
dest_emot = "basis_data/sorted_set/%s/%s" % (
emotions[emotion], sourcefile_emotion[25:]) # Do same for emotion containing image
# Erstelle neue Pfade zum einsortieren
dest_neut = "basis_data/sorted_set/neutral/%s" % sourcefile_neutral[36:] # für den neutralen Ausdruck
dest_emot = "basis_data/sorted_set/%s/%s" % (emotions[emotion], sourcefile_emotion[36:]) # und die Emotion
copyfile(sourcefile_neutral, dest_neut) # Copy file
copyfile(sourcefile_emotion, dest_emot) # Copy file
\ No newline at end of file
# Kopiere Dateien
copyfile(sourcefile_neutral, dest_neut)
copyfile(sourcefile_emotion, dest_emot)
\ No newline at end of file
......@@ -8,19 +8,32 @@ import logging
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename='logs/process_model.log')
args = sys.argv
args = sys.argv # liest Input Parameter
logging.debug('Fisherface training initialized')
def get_files_from_emotion(emotion): # Define function to get file list, randomly shuffle it and split 80/20
files = glob.glob('basis_data/dataset/%s/*' % emotion)
file = open("gray_equalized.csv", "w")
def _get_faces_from_emotion(emotion):
"""
Holt alle Dateien zu einer Emotion aus dem Dataset, mischt sie und teilt sie in ein Trainings- und Prognoseset.
:param emotion: Die Emotion
:return: training, prediction
"""
files = glob.glob('basis_data/dataset/{}/*'.format(emotion))
random.shuffle(files)
training = files[:int(len(files) * 0.8)] # get first 80% of file list
prediction = files[-int(len(files) * 0.2):] # get last 20% of file list
return training, prediction
def image_preprocessing(item):
image = cv2.imread(item) # open image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # convert to grayscale
def image_preprocessing(image):
"""
Preprocessing der Dateien
:param item: Bild
:return:
"""
img = cv2.imread(image) # open image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # convert to grayscale
return gray
def make_sets():
......@@ -29,7 +42,7 @@ def make_sets():
prediction_data = []
prediction_labels = []
for emotion in emotions:
training, prediction = get_files_from_emotion(emotion)
training, prediction = _get_faces_from_emotion(emotion)
# Append data to training and prediction list, and generate labels 0-7
for item in training:
img = image_preprocessing(item)
......@@ -66,16 +79,18 @@ if len(args) > 1:
tags = ', '.join(args[1:])
logging.debug(tags.upper())
emotions = ["anger", "disgust", "happy", "neutral", "surprise"] # Emotion list
emotions = ["happy", "neutral", "surprise"] # Emotion list
fishface = cv2.face.FisherFaceRecognizer_create()
# Now run it
metascore = []
for i in range(0, 10):
for i in range(0, 20):
correct = run_recognizer()
logging.debug("{} : {}%".format(i, int(correct)))
file.write("{}, {}".format(i, int(correct)))
metascore.append(correct)
file.close()
logging.debug("{} iterations - {}% average\n".format(len(metascore), np.mean(metascore)))
fishface.write('basis_data/models/detection_model.xml')
\ No newline at end of file
......@@ -8,7 +8,7 @@ logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(name)-12s %(level
filename='logs/sorted_set_facedetector.log',
filemode='w')
emotions = ["neutral", "anger", "disgust", "happy", "surprise"] # Define emotions
emotions = ["happy", "neutral", "surprise"] # Emotionen die verarbeitet werden sollen
totalFiles: int = 0
totalFaces: int = 0
......@@ -16,7 +16,7 @@ undetected: list = []
def detect_faces(emotion):
files = glob.glob('basis_data/sorted_set/%s/*' % emotion) # Get list of all images with emotion
files = glob.glob('basis_data/sorted_set/{}/*'.format(emotion)) # Holt alle Dateien zu einer Emotion aus dem sorted_set
global undetected
global totalFaces
......@@ -27,23 +27,24 @@ def detect_faces(emotion):
fileNumber = 0
for f in files:
frame = cv2.imread(f) # Open image
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert image to grayscale
facefeatures = locate_faces(frame, 1.05, 6)
facefeatures = locate_faces(gray, 1.05, 10)
if facefeatures is '':
undetected.append(f)
# Cut and save face
for (x, y, w, h) in facefeatures: # get coordinates and size of rectangle containing face
logging.debug("face found in file: {}".format(f))
totalFaces += 1
frame = frame[y:y + h, x:x + w] # Cut the frame to size
try:
out = cv2.resize(frame, (250, 250)) # Resize face so all images have same size
cv2.imwrite('basis_data/dataset/%s/%s.jpg' % (emotion, fileNumber), out) # Write image
except:
pass # If error, pass file
else:
# Cut and save face
for (x, y, w, h) in facefeatures: # get coordinates and size of rectangle containing face
logging.debug("face found in file: {}".format(f))
totalFaces += 1
gray = gray[y:y + h, x:x + w] # Cut the frame to size
try:
out = cv2.resize(gray, (250, 250)) # Resize face so all images have same size
cv2.imwrite('basis_data/dataset/{}/{}.jpg'.format(emotion, fileNumber), out) # Write image
except:
pass # If error, pass file
totalFiles += 1 # Increment image number
fileNumber += 1
......
......@@ -3,14 +3,13 @@ This module is the main module in this package. It loads emotion recognition mod
shows a webcam image, recognizes face and it's emotion and draw emotion on the image.
"""
from cv2 import WINDOW_NORMAL
import cv2
from RingBuffer import RingBuffer
from WebcamVideoStream import WebcamVideoStream
from face_detect import find_faces
from image_commons import nparray_as_image, draw_with_alpha
import numpy as np
def _load_emoticons(emotions):
......@@ -22,7 +21,7 @@ def _load_emoticons(emotions):
return [nparray_as_image(cv2.imread('resources/graphics/%s.png' % emotion, -1), mode=None) for emotion in emotions]
def show_webcam_and_run(model, emoticons, window_size=(800, 800), window_name='Mood Expression', update_time=1):
def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name='Mood Expression', update_time=1):
"""
Shows webcam image, detects faces and its emotions in real time and draw emoticons over those faces.
:param model: Learnt emotion detection model.
......@@ -31,21 +30,27 @@ def show_webcam_and_run(model, emoticons, window_size=(800, 800), window_name='M
:param window_name: Name of webcam image window.
:param update_time: Image update time interval.
"""
cv2.namedWindow(window_name, WINDOW_NORMAL)
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
if window_size:
width, height = window_size
cv2.resizeWindow(window_name, width, height)
vc = WebcamVideoStream(src=0).start()
vc = WebcamVideoStream().start()
frame = vc.read()
puffer = RingBuffer(7) # Der RingBuffer speichert die letzten Predictions
while True:
for normalized_face, (x, y, w, h) in find_faces(frame):
prediction = model.predict(normalized_face) # do prediction
image_to_draw = emoticons[(prediction[0])]
if x - 150 > 0 and y - 50 > 0 and w - 150 > 0 and h - 150 > 0:
draw_with_alpha(frame, image_to_draw, (x-150, y-50, w-150, h-150))
puffer.append(prediction[0]) # Speichere letzte Prediction
preds = puffer.get() # Hole Einträge als Array
if not (any(x is None for x in preds)): # Kein Eintrag im RingBuffer ist None
unique, counts = np.unique(preds, return_counts=True) # Vorkommen der Predictions zählen
image_to_draw = emoticons[unique[0]] # häufigster Wert wird dargestellt
draw_with_alpha(frame, image_to_draw, (40, 40, 200, 200))
cv2.imshow(window_name, frame)
frame = vc.read()
......@@ -59,7 +64,7 @@ def show_webcam_and_run(model, emoticons, window_size=(800, 800), window_name='M
if __name__ == '__main__':
emotions = ['neutral', 'anger', 'disgust', 'happy', 'surprise']
emotions = ['happy', 'neutral', 'surprise']
emoticons = _load_emoticons(emotions)
# load mode
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment