import argparse import os import cv2 import cv2.face import numpy as np from RingBuffer import RingBuffer from WebcamVideoStream import WebcamVideoStream from face_detect import extract_faces from image_commons import nparray_as_image, draw_with_alpha parser = argparse.ArgumentParser(description='ProjectMood Emotion Detection') parser.add_argument('-b', '--buffer', action='store', dest='buffer', default=12, type=int, help='size of ringbuffer') parser.add_argument('-m', '--model', action='store', dest='model', default='resources/models/detection_model.xml', help='path to model') arguments = parser.parse_args() def _load_emoticons(emotions): """ Load the emoticons from the emojis folder. : param emotions: emotions as an array. : return: Array of Emotions graphics. """ return [nparray_as_image(cv2.imread('resources/emojis/{}.png'.format(emotion), -1), mode=None) for emotion in emotions] def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name=parser.description, update_time=1): """ Shows a webcam image, recognizes faces and emotions in real time and draws emoticons next to the faces. : param model: Trained Model : param emoticons: list of emoticons. : param window_size: Size of the webcam window. : param window_name: name of the webcam window. : param update_time: Image update time. """ cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) width, height = window_size cv2.resizeWindow(window_name, width, height) vc = WebcamVideoStream().start() # a random image from the dataset to determine the image format (important for Fisherface) random = cv2.imread('resources/img_data/dataset/{}/0.jpg'.format(emotions[0])) resizefactor = np.size(random, 0) # The RingBuffer stores the last x Predictions buffer = RingBuffer(arguments.buffer) frame = vc.read() while True: for normalized_face in extract_faces(frame, resizefactor): prediction = model.predict(normalized_face) # do prediction # Save the Predictions buffer.append(prediction[0]) # Get the entries as an array predictions = buffer.get() # No entry in the ring buffer is None if not (any(x is None for x in predictions)): # Counting occurrences of predictions unique, counts = np.unique(predictions, return_counts=True) # Most frequent value is displayed image_to_draw = emoticons[unique[0]] draw_with_alpha(frame, image_to_draw, (40, 40, 200, 200)) cv2.imshow(window_name, frame) frame = vc.read() key = cv2.waitKey(update_time) # exit on ESC if key == 27: vc.stop() break cv2.destroyWindow(window_name) if __name__ == '__main__': # The emotions in the Dataset folder should also be loaded in the application _, emotions, _ = next(os.walk('resources/img_data/dataset'), (None, [], None)) emoticons = _load_emoticons(emotions) fisher_face = cv2.face.FisherFaceRecognizer_create() # Load the trained model fisher_face.read(arguments.model) show_webcam_and_run(fisher_face, emoticons)