Commit 099aee22 authored by Arne Gerdes's avatar Arne Gerdes

webcam.py und WebcamVideoStream.py die Kommentare geändert

parent d4b78a8b
...@@ -5,48 +5,38 @@ import cv2 ...@@ -5,48 +5,38 @@ import cv2
class WebcamVideoStream: class WebcamVideoStream:
def __init__(self, src=0): def __init__(self, src=0):
"""
initialisiere den Webcam Stream und liest den ersten Frame aus dem Stream # Initialize webcam stream and read the first frame from the stream
"""
self.stream = cv2.VideoCapture(src) self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read() (self.grabbed, self.frame) = self.stream.read()
""" # Variable indicating whether the thread should be stopped
Initialisiere die Variable, die angibt, ob der Thread gestoppt werden soll
"""
self.stopped = False self.stopped = False
def start(self): def start(self):
"""
starte den Thread, um Frames aus dem Webcam Stream zu lesen # start the thread to read frames from the webcam stream
"""
Thread(target=self.update, args=()).start() Thread(target=self.update, args=()).start()
return self return self
def update(self): def update(self):
"""
Endloss Schleife, bis der Thread gestoppt wird # Endless loop until the thread stops
"""
while True: while True:
"""
Wenn die Thread-Indikator-Variable gesetzt ist, stoppt diese den Thread # If the thread indicator variable is set, it stops the thread
"""
if self.stopped: if self.stopped:
return return
""" # Otherwise, the next frame is read from the stream
Ansonsten wird der nächste Frame aus dem Stream gelesen
"""
(self.grabbed, self.frame) = self.stream.read() (self.grabbed, self.frame) = self.stream.read()
def read(self): def read(self):
"""
gibt den zuletzt gelesenen Frame zurück # returns the last frame
"""
return self.frame return self.frame
def stop(self): def stop(self):
"""
gibt an, dass der Thread gestoppt werden soll # indicates that the thread should be stopped
"""
self.stopped = True self.stopped = True
import numpy as np import os
import random
import cv2 import cv2
import mxnet as mx import numpy as np
import pandas as pd import pandas as pd
import random
import os
curdir = os.path.abspath(os.path.dirname(__file__)) curdir = os.path.abspath(os.path.dirname(__file__))
def gen_record(csvfile,channel):
data = pd.read_csv(csvfile,delimiter=',',dtype='a') def gen_record(csvfile, channel):
labels = np.array(data['emotion'],np.float) data = pd.read_csv(csvfile, delimiter=',', dtype='a')
labels = np.array(data['emotion'], np.float)
# print(labels,'\n',data['emotion']) # print(labels,'\n',data['emotion'])
imagebuffer = np.array(data['pixels']) imagebuffer = np.array(data['pixels'])
images = np.array([np.fromstring(image,np.uint8,sep=' ') for image in imagebuffer]) images = np.array([np.fromstring(image, np.uint8, sep=' ') for image in imagebuffer])
del imagebuffer del imagebuffer
num_shape = int(np.sqrt(images.shape[-1])) num_shape = int(np.sqrt(images.shape[-1]))
images.shape = (images.shape[0],num_shape,num_shape) images.shape = (images.shape[0], num_shape, num_shape)
# img=images[0];cv2.imshow('test',img);cv2.waitKey(0);cv2.destroyAllWindow();exit() # img=images[0];cv2.imshow('test',img);cv2.waitKey(0);cv2.destroyAllWindow();exit()
dirs = set(data['Usage']) dirs = set(data['Usage'])
subdirs = set(labels) subdirs = set(labels)
class_dir = {} class_dir = {}
for dr in dirs: for dr in dirs:
dest = os.path.join(curdir,dr) dest = os.path.join(curdir, dr)
class_dir[dr] = dest class_dir[dr] = dest
if not os.path.exists(dest): if not os.path.exists(dest):
os.mkdir(dest) os.mkdir(dest)
data = zip(labels,images,data['Usage']) data = zip(labels, images, data['Usage'])
for d in data: for d in data:
destdir = os.path.join(class_dir[d[-1]],str(int(d[0]))) destdir = os.path.join(class_dir[d[-1]], str(int(d[0])))
if not os.path.exists(destdir): if not os.path.exists(destdir):
os.mkdir(destdir) os.mkdir(destdir)
img = d[1] img = d[1]
filepath = unique_name(destdir,d[-1]) filepath = unique_name(destdir, d[-1])
print('[^_^] Write image to %s' % filepath) print('[^_^] Write image to %s' % filepath)
if not filepath: if not filepath:
continue continue
sig = cv2.imwrite(filepath,img) sig = cv2.imwrite(filepath, img)
if not sig: if not sig:
print('Error') print('Error')
exit(-1) exit(-1)
def unique_name(pardir,prefix,suffix='jpg'): def unique_name(pardir, prefix, suffix='jpg'):
filename = '{0}_{1}.{2}'.format(prefix,random.randint(1,10**8),suffix) filename = '{0}_{1}.{2}'.format(prefix, random.randint(1, 10 ** 8), suffix)
filepath = os.path.join(pardir,filename) filepath = os.path.join(pardir, filename)
if not os.path.exists(filepath): if not os.path.exists(filepath):
return filepath return filepath
unique_name(pardir,prefix,suffix) unique_name(pardir, prefix, suffix)
if __name__ == '__main__': if __name__ == '__main__':
filename = 'fer2013.csv' filename = 'fer2013.csv'
filename = os.path.join(curdir,filename) filename = os.path.join(curdir, filename)
gen_record(filename,1) gen_record(filename, 1)
# ##################### test # ##################### test
# tmp = unique_name('./Training','Training') # tmp = unique_name('./Training','Training')
# print(tmp) # print(tmp)
\ No newline at end of file
import cv2 import cv2
faceDet_one = cv2.CascadeClassifier('resources/haarcascade/haarcascade_frontalface_default.xml') faceDet_one = cv2.CascadeClassifier('resources/haarcascade/haarcascade_frontalface_default.xml')
faceDet_two = cv2.CascadeClassifier('resources/haarcascade/haarcascade_frontalface_alt2.xml') faceDet_two = cv2.CascadeClassifier('resources/haarcascade/haarcascade_frontalface_alt2.xml')
faceDet_three = cv2.CascadeClassifier('resources/haarcascade/haarcascade_frontalface_alt.xml') faceDet_three = cv2.CascadeClassifier('resources/haarcascade/haarcascade_frontalface_alt.xml')
......
"""
Diese Klasse ist das Main-Modul. Es lädt das Modell aus models, zeigt ein Webcam-Bild,
erkennt das Gesicht und seine Emotionen und zeichnet ein Emoticon in das Bild.
"""
import argparse import argparse
import os
import cv2 import cv2
import cv2.face import cv2.face
...@@ -11,7 +8,6 @@ from RingBuffer import RingBuffer ...@@ -11,7 +8,6 @@ from RingBuffer import RingBuffer
from WebcamVideoStream import WebcamVideoStream from WebcamVideoStream import WebcamVideoStream
from face_detect import extract_faces from face_detect import extract_faces
from image_commons import nparray_as_image, draw_with_alpha from image_commons import nparray_as_image, draw_with_alpha
import os
parser = argparse.ArgumentParser(description='ProjectMood Emotion Detection') parser = argparse.ArgumentParser(description='ProjectMood Emotion Detection')
parser.add_argument('-b', '--buffer', action='store', dest='buffer', default=12, type=int, help='size of ringbuffer') parser.add_argument('-b', '--buffer', action='store', dest='buffer', default=12, type=int, help='size of ringbuffer')
...@@ -19,23 +15,25 @@ parser.add_argument('-m', '--model', action='store', dest='model', default='reso ...@@ -19,23 +15,25 @@ parser.add_argument('-m', '--model', action='store', dest='model', default='reso
help='path to model') help='path to model')
arguments = parser.parse_args() arguments = parser.parse_args()
def _load_emoticons(emotions): def _load_emoticons(emotions):
""" """
Lädt die Emoticons aus dem emojis Ordner. Load the emoticons from the emojis folder.
:param emotions: Emotionen als Array. : param emotions: emotions as an array.
:return: Array von Emotions Grafiken. : return: Array of Emotions graphics.
""" """
return [nparray_as_image(cv2.imread('resources/emojis/{}.png'.format(emotion), -1), mode=None) for emotion in emotions] return [nparray_as_image(cv2.imread('resources/emojis/{}.png'.format(emotion), -1), mode=None) for emotion in
emotions]
def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name=parser.description, update_time=1): def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name=parser.description, update_time=1):
""" """
Zeigt ein Webcam-Bild, erkennt Gesichter und Emotionen in Echtzeit und zeichnet Emoticons neben die Gesichter. Shows a webcam image, recognizes faces and emotions in real time and draws emoticons next to the faces.
:param model: Trainiertes Model : param model: Trained Model
:param emoticons: Liste von Emoticons. : param emoticons: list of emoticons.
:param window_size: Grösse des Webcam-Fensters. : param window_size: Size of the webcam window.
:param window_name: Name des Webcam-Fensters. : param window_name: name of the webcam window.
:param update_time: Bildaktualisierungzeit. : param update_time: Image update time.
""" """
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
width, height = window_size width, height = window_size
...@@ -43,13 +41,11 @@ def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name=pa ...@@ -43,13 +41,11 @@ def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name=pa
vc = WebcamVideoStream().start() vc = WebcamVideoStream().start()
# ein zufälliges Bild aus dem Dataset um das Bildformat zu bestimmen (für Fisherface wichtig) # a random image from the dataset to determine the image format (important for Fisherface)
random = cv2.imread('resources/img_data/dataset/{}/0.jpg'.format(emotions[0])) random = cv2.imread('resources/img_data/dataset/{}/0.jpg'.format(emotions[0]))
resizefactor = np.size(random, 0) resizefactor = np.size(random, 0)
""" # The RingBuffer stores the last x Predictions
Der RingBuffer speichert die letzten x Predictions
"""
buffer = RingBuffer(arguments.buffer) buffer = RingBuffer(arguments.buffer)
frame = vc.read() frame = vc.read()
...@@ -58,28 +54,18 @@ def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name=pa ...@@ -58,28 +54,18 @@ def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name=pa
for normalized_face in extract_faces(frame, resizefactor): for normalized_face in extract_faces(frame, resizefactor):
prediction = model.predict(normalized_face) # do prediction prediction = model.predict(normalized_face) # do prediction
""" # Save the Predictions
Seichert die Predictions
"""
buffer.append(prediction[0]) buffer.append(prediction[0])
""" # Get the entries as an array
Holt die Einträge als Array
"""
predictions = buffer.get() predictions = buffer.get()
""" # No entry in the ring buffer is None
Kein Eintrag im RingBuffer ist None
"""
if not (any(x is None for x in predictions)): if not (any(x is None for x in predictions)):
""" # Counting occurrences of predictions
Vorkommen der Predictions zählen
"""
unique, counts = np.unique(predictions, return_counts=True) unique, counts = np.unique(predictions, return_counts=True)
""" # Most frequent value is displayed
Häufigster Wert wird dargestellt
"""
image_to_draw = emoticons[unique[0]] image_to_draw = emoticons[unique[0]]
draw_with_alpha(frame, image_to_draw, (40, 40, 200, 200)) draw_with_alpha(frame, image_to_draw, (40, 40, 200, 200))
...@@ -87,7 +73,8 @@ def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name=pa ...@@ -87,7 +73,8 @@ def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name=pa
frame = vc.read() frame = vc.read()
key = cv2.waitKey(update_time) key = cv2.waitKey(update_time)
if key == 27: # exit on ESC # exit on ESC
if key == 27:
vc.stop() vc.stop()
break break
...@@ -95,13 +82,13 @@ def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name=pa ...@@ -95,13 +82,13 @@ def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name=pa
if __name__ == '__main__': if __name__ == '__main__':
# Die Emotionen die im Datasetordner liegen, sollen auch in der Applikation hochgefahren werden # The emotions in the Dataset folder should also be loaded in the application
_, emotions, _ = next(os.walk('resources/img_data/dataset'), (None, [], None)) _, emotions, _ = next(os.walk('resources/img_data/dataset'), (None, [], None))
emoticons = _load_emoticons(emotions) emoticons = _load_emoticons(emotions)
fisher_face = cv2.face.FisherFaceRecognizer_create() fisher_face = cv2.face.FisherFaceRecognizer_create()
# Lädt das trainierte Model # Load the trained model
fisher_face.read(arguments.model) fisher_face.read(arguments.model)
show_webcam_and_run(fisher_face, emoticons) show_webcam_and_run(fisher_face, emoticons)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment