Commit 099aee22 authored by Arne Gerdes's avatar Arne Gerdes

webcam.py und WebcamVideoStream.py die Kommentare geändert

parent d4b78a8b
......@@ -5,48 +5,38 @@ import cv2
class WebcamVideoStream:
def __init__(self, src=0):
"""
initialisiere den Webcam Stream und liest den ersten Frame aus dem Stream
"""
# Initialize webcam stream and read the first frame from the stream
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
"""
Initialisiere die Variable, die angibt, ob der Thread gestoppt werden soll
"""
# Variable indicating whether the thread should be stopped
self.stopped = False
def start(self):
"""
starte den Thread, um Frames aus dem Webcam Stream zu lesen
"""
# start the thread to read frames from the webcam stream
Thread(target=self.update, args=()).start()
return self
def update(self):
"""
Endloss Schleife, bis der Thread gestoppt wird
"""
# Endless loop until the thread stops
while True:
"""
Wenn die Thread-Indikator-Variable gesetzt ist, stoppt diese den Thread
"""
# If the thread indicator variable is set, it stops the thread
if self.stopped:
return
"""
Ansonsten wird der nächste Frame aus dem Stream gelesen
"""
# Otherwise, the next frame is read from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
"""
gibt den zuletzt gelesenen Frame zurück
"""
# returns the last frame
return self.frame
def stop(self):
"""
gibt an, dass der Thread gestoppt werden soll
"""
# indicates that the thread should be stopped
self.stopped = True
import numpy as np
import os
import random
import cv2
import mxnet as mx
import numpy as np
import pandas as pd
import random
import os
curdir = os.path.abspath(os.path.dirname(__file__))
def gen_record(csvfile,channel):
data = pd.read_csv(csvfile,delimiter=',',dtype='a')
labels = np.array(data['emotion'],np.float)
def gen_record(csvfile, channel):
data = pd.read_csv(csvfile, delimiter=',', dtype='a')
labels = np.array(data['emotion'], np.float)
# print(labels,'\n',data['emotion'])
imagebuffer = np.array(data['pixels'])
images = np.array([np.fromstring(image,np.uint8,sep=' ') for image in imagebuffer])
images = np.array([np.fromstring(image, np.uint8, sep=' ') for image in imagebuffer])
del imagebuffer
num_shape = int(np.sqrt(images.shape[-1]))
images.shape = (images.shape[0],num_shape,num_shape)
images.shape = (images.shape[0], num_shape, num_shape)
# img=images[0];cv2.imshow('test',img);cv2.waitKey(0);cv2.destroyAllWindow();exit()
dirs = set(data['Usage'])
subdirs = set(labels)
class_dir = {}
for dr in dirs:
dest = os.path.join(curdir,dr)
dest = os.path.join(curdir, dr)
class_dir[dr] = dest
if not os.path.exists(dest):
os.mkdir(dest)
data = zip(labels,images,data['Usage'])
data = zip(labels, images, data['Usage'])
for d in data:
destdir = os.path.join(class_dir[d[-1]],str(int(d[0])))
destdir = os.path.join(class_dir[d[-1]], str(int(d[0])))
if not os.path.exists(destdir):
os.mkdir(destdir)
img = d[1]
filepath = unique_name(destdir,d[-1])
filepath = unique_name(destdir, d[-1])
print('[^_^] Write image to %s' % filepath)
if not filepath:
continue
sig = cv2.imwrite(filepath,img)
sig = cv2.imwrite(filepath, img)
if not sig:
print('Error')
exit(-1)
def unique_name(pardir,prefix,suffix='jpg'):
filename = '{0}_{1}.{2}'.format(prefix,random.randint(1,10**8),suffix)
filepath = os.path.join(pardir,filename)
def unique_name(pardir, prefix, suffix='jpg'):
filename = '{0}_{1}.{2}'.format(prefix, random.randint(1, 10 ** 8), suffix)
filepath = os.path.join(pardir, filename)
if not os.path.exists(filepath):
return filepath
unique_name(pardir,prefix,suffix)
unique_name(pardir, prefix, suffix)
if __name__ == '__main__':
filename = 'fer2013.csv'
filename = os.path.join(curdir,filename)
gen_record(filename,1)
filename = os.path.join(curdir, filename)
gen_record(filename, 1)
# ##################### test
# tmp = unique_name('./Training','Training')
......
import cv2
faceDet_one = cv2.CascadeClassifier('resources/haarcascade/haarcascade_frontalface_default.xml')
faceDet_two = cv2.CascadeClassifier('resources/haarcascade/haarcascade_frontalface_alt2.xml')
faceDet_three = cv2.CascadeClassifier('resources/haarcascade/haarcascade_frontalface_alt.xml')
......
"""
Diese Klasse ist das Main-Modul. Es lädt das Modell aus models, zeigt ein Webcam-Bild,
erkennt das Gesicht und seine Emotionen und zeichnet ein Emoticon in das Bild.
"""
import argparse
import os
import cv2
import cv2.face
......@@ -11,7 +8,6 @@ from RingBuffer import RingBuffer
from WebcamVideoStream import WebcamVideoStream
from face_detect import extract_faces
from image_commons import nparray_as_image, draw_with_alpha
import os
parser = argparse.ArgumentParser(description='ProjectMood Emotion Detection')
parser.add_argument('-b', '--buffer', action='store', dest='buffer', default=12, type=int, help='size of ringbuffer')
......@@ -19,23 +15,25 @@ parser.add_argument('-m', '--model', action='store', dest='model', default='reso
help='path to model')
arguments = parser.parse_args()
def _load_emoticons(emotions):
"""
Lädt die Emoticons aus dem emojis Ordner.
:param emotions: Emotionen als Array.
:return: Array von Emotions Grafiken.
Load the emoticons from the emojis folder.
: param emotions: emotions as an array.
: return: Array of Emotions graphics.
"""
return [nparray_as_image(cv2.imread('resources/emojis/{}.png'.format(emotion), -1), mode=None) for emotion in emotions]
return [nparray_as_image(cv2.imread('resources/emojis/{}.png'.format(emotion), -1), mode=None) for emotion in
emotions]
def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name=parser.description, update_time=1):
"""
Zeigt ein Webcam-Bild, erkennt Gesichter und Emotionen in Echtzeit und zeichnet Emoticons neben die Gesichter.
:param model: Trainiertes Model
:param emoticons: Liste von Emoticons.
:param window_size: Grösse des Webcam-Fensters.
:param window_name: Name des Webcam-Fensters.
:param update_time: Bildaktualisierungzeit.
Shows a webcam image, recognizes faces and emotions in real time and draws emoticons next to the faces.
: param model: Trained Model
: param emoticons: list of emoticons.
: param window_size: Size of the webcam window.
: param window_name: name of the webcam window.
: param update_time: Image update time.
"""
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
width, height = window_size
......@@ -43,13 +41,11 @@ def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name=pa
vc = WebcamVideoStream().start()
# ein zufälliges Bild aus dem Dataset um das Bildformat zu bestimmen (für Fisherface wichtig)
# a random image from the dataset to determine the image format (important for Fisherface)
random = cv2.imread('resources/img_data/dataset/{}/0.jpg'.format(emotions[0]))
resizefactor = np.size(random, 0)
"""
Der RingBuffer speichert die letzten x Predictions
"""
# The RingBuffer stores the last x Predictions
buffer = RingBuffer(arguments.buffer)
frame = vc.read()
......@@ -58,28 +54,18 @@ def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name=pa
for normalized_face in extract_faces(frame, resizefactor):
prediction = model.predict(normalized_face) # do prediction
"""
Seichert die Predictions
"""
# Save the Predictions
buffer.append(prediction[0])
"""
Holt die Einträge als Array
"""
# Get the entries as an array
predictions = buffer.get()
"""
Kein Eintrag im RingBuffer ist None
"""
# No entry in the ring buffer is None
if not (any(x is None for x in predictions)):
"""
Vorkommen der Predictions zählen
"""
# Counting occurrences of predictions
unique, counts = np.unique(predictions, return_counts=True)
"""
Häufigster Wert wird dargestellt
"""
# Most frequent value is displayed
image_to_draw = emoticons[unique[0]]
draw_with_alpha(frame, image_to_draw, (40, 40, 200, 200))
......@@ -87,7 +73,8 @@ def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name=pa
frame = vc.read()
key = cv2.waitKey(update_time)
if key == 27: # exit on ESC
# exit on ESC
if key == 27:
vc.stop()
break
......@@ -95,13 +82,13 @@ def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name=pa
if __name__ == '__main__':
# Die Emotionen die im Datasetordner liegen, sollen auch in der Applikation hochgefahren werden
# The emotions in the Dataset folder should also be loaded in the application
_, emotions, _ = next(os.walk('resources/img_data/dataset'), (None, [], None))
emoticons = _load_emoticons(emotions)
fisher_face = cv2.face.FisherFaceRecognizer_create()
# Lädt das trainierte Model
# Load the trained model
fisher_face.read(arguments.model)
show_webcam_and_run(fisher_face, emoticons)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment