Commit 36ac2db8 authored by Arne Gerdes's avatar Arne Gerdes

Kommentare hinzugefügt

parent 7ab441c1
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
""" """
This module is the main module in this package. It loads emotion recognition model from a file, Dieses Modul ist das Main-Modul. Es lädt das Modell aus models, zeigt ein Webcam-Bild,
shows a webcam image, recognizes face and it's emotion and draw emotion on the image. erkennt das Gesicht und seine Emotionen und zeichnet ein Emoticon in das Bild.
""" """
import cv2 import cv2
...@@ -14,21 +14,21 @@ import numpy as np ...@@ -14,21 +14,21 @@ import numpy as np
def _load_emoticons(emotions): def _load_emoticons(emotions):
""" """
Loads emotions images from graphics folder. Lädt die Emoticons aus dem graphics Ordner.
:param emotions: Array of emotions names. :param emotions: Array von Emotionen.
:return: Array of emotions graphics. :return: Array von Emotions Grafiken.
""" """
return [nparray_as_image(cv2.imread('resources/graphics/%s.png' % emotion, -1), mode=None) for emotion in emotions] return [nparray_as_image(cv2.imread('resources/graphics/%s.png' % emotion, -1), mode=None) for emotion in emotions]
def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name='Mood Expression', update_time=1): def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name='Mood Expression', update_time=1):
""" """
Shows webcam image, detects faces and its emotions in real time and draw emoticons over those faces. Zeigt ein Webcam-Bild, erkennt Gesichter und Emotionen in Echtzeit und zeichnet Emoticons neben die Gesichter.
:param model: Learnt emotion detection model. :param model: Trainiertes Model
:param emoticons: List of emotions images. :param emoticons: Liste von Emoticons.
:param window_size: Size of webcam image window. :param window_size: Grösse des Webcam-Fensters.
:param window_name: Name of webcam image window. :param window_name: Name des Webcam-Fensters.
:param update_time: Image update time interval. :param update_time: Bildaktualisierungzeit.
""" """
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
if window_size: if window_size:
...@@ -37,19 +37,39 @@ def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name='M ...@@ -37,19 +37,39 @@ def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name='M
vc = WebcamVideoStream().start() vc = WebcamVideoStream().start()
frame = vc.read() frame = vc.read()
"""
puffer = RingBuffer(7) # Der RingBuffer speichert die letzten Predictions Der RingBuffer speichert die letzten 7 Predictions
"""
puffer = RingBuffer(7)
while True: while True:
for normalized_face, (x, y, w, h) in find_faces(frame): for normalized_face, (x, y, w, h) in find_faces(frame):
prediction = model.predict(normalized_face) # do prediction
puffer.append(prediction[0]) # Speichere letzte Prediction prediction = model.predict(normalized_face) # do prediction
preds = puffer.get() # Hole Einträge als Array
if not (any(x is None for x in preds)): # Kein Eintrag im RingBuffer ist None """
unique, counts = np.unique(preds, return_counts=True) # Vorkommen der Predictions zählen Seichert die Predictions
image_to_draw = emoticons[unique[0]] # häufigster Wert wird dargestellt """
puffer.append(prediction[0])
"""
Holt die Einträge als Array
"""
preds = puffer.get()
"""
Kein Eintrag im RingBuffer ist None
"""
if not (any(x is None for x in preds)):
"""
Vorkommen der Predictions zählen
"""
unique, counts = np.unique(preds, return_counts=True)
"""
Häufigster Wert wird dargestellt
"""
image_to_draw = emoticons[unique[0]]
draw_with_alpha(frame, image_to_draw, (40, 40, 200, 200)) draw_with_alpha(frame, image_to_draw, (40, 40, 200, 200))
cv2.imshow(window_name, frame) cv2.imshow(window_name, frame)
...@@ -67,8 +87,9 @@ if __name__ == '__main__': ...@@ -67,8 +87,9 @@ if __name__ == '__main__':
emotions = ['happy', 'neutral', 'surprise'] emotions = ['happy', 'neutral', 'surprise']
emoticons = _load_emoticons(emotions) emoticons = _load_emoticons(emotions)
# load mode
fisher_face = cv2.face.FisherFaceRecognizer_create() fisher_face = cv2.face.FisherFaceRecognizer_create()
"""Läadt das trainierte Model"""
fisher_face.read('basis_data/models/detection_model.xml') fisher_face.read('basis_data/models/detection_model.xml')
# use learnt model
show_webcam_and_run(fisher_face, emoticons) show_webcam_and_run(fisher_face, emoticons)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment