Commit 23de87a0 authored by tihmels's avatar tihmels
Browse files

Repo aufgeräumt, alles mindestens einmal verschoben, aus Prinzip -

Email Service eingebaut, um bei der Massenverarbeitung Logdateien schicken zu können.
Und ArgumentParser, bisher nur bei sorted_set_facedetector implementiert, um Programm mit Parametern über Konsole zu starten. Biaaaatchesssss <3
parent 05fba73b
import cv2
import glob
import logging
import argparse
import shutil
import sys
import os
from face_detect import locate_faces
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
......@@ -9,60 +15,91 @@ logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(name)-12s %(level
filemode='w')
"""
Emotionen die verarbeitet werden sollen
Argument Parser erlaubt Parameter für die Verarbeitung anzugeben.
"""
emotions = ["happy", "neutral", "surprise"]
parser = argparse.ArgumentParser(description='Sorted Set Face Creator Application')
parser.add_argument('-s', action='store', dest='img_source', default='resources/img_data/sorted_set/',
help='Pfad zu den Bilddateien')
parser.add_argument('-b', action='store', dest='dataset', default='resources/img_data/dataset/', help='Pfad zum Dataset')
parser.add_argument('-e', action='append', dest='emotions', default=['happy', 'neutral', 'surprised'],
help='Emotionen die verarbeitet werden sollen')
parser.add_argument('-r', action='store', dest='resize', default=150, type=int, help='Resize Factor')
parser.add_argument('-f', action='store', dest='scaleFactor', default=1.1, type=float,
help='Skalierungsfaktor für Haarcascade')
parser.add_argument('-n', action='store', dest='minNeighbors', default=6, type=int, help='MinNeighbors für Haarcascade')
parser.add_argument('-m', action='store', dest='minSize', default=40, type=int, help='MinSize für Haarcascade')
arguments = parser.parse_args()
logging.debug(arguments)
datasetPath = arguments.dataset
if len(glob.glob(datasetPath + '*')) > 0:
deleteDataset = input(
'Im Dataset befinden sich Dateien. Durch diesen Prozess werden die existierenden Daten gelöscht. Fortfahren (y/n): ')
if deleteDataset == 'y':
for file in glob.glob(datasetPath + '*'):
shutil.rmtree(file)
else:
sys.exit()
totalFiles: int = 0
totalFaces: int = 0
undetected: list = []
img_source = arguments.img_source
def detect_faces(emotion):
"""
Holt alle Dateien zu einer Emotion aus dem sorted_set
"""
files = glob.glob('basis_data/sorted_set/{}/*'.format(emotion))
files = glob.glob(img_source + '{}/*'.format(emotion))
global undetected
global totalFaces
global totalFiles
logging.debug("{} files in folder {}".format(len(files), emotion))
logging.debug("found {} {} files".format(len(files), emotion))
fileNumber = 0
for f in files:
frame = cv2.imread(f) # Open image
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert image to grayscale
facefeatures = locate_faces(gray, 1.1, 10, (40, 40))
facefeatures = locate_faces(gray, arguments.scaleFactor, arguments.minNeighbors, (arguments.minSize, arguments.minSize))
if facefeatures is '':
undetected.append(f)
else:
# Cut and save face
for (x, y, w, h) in facefeatures: # get coordinates and size of rectangle containing face
logging.debug("face found in file: {}".format(f))
totalFaces += 1
gray = gray[y:y + h, x:x + w] # Cut the frame to size
try:
out = cv2.resize(gray, (250, 250)) # Resize face so all images have same size
cv2.imwrite('basis_data/dataset/{}/{}.jpg'.format(emotion, fileNumber), out) # Write image
out = cv2.resize(gray, (arguments.resize, arguments.resize)) # Resize face so all images have same size
success = cv2.imwrite(datasetPath + '{}/{}.jpg'.format(emotion, fileNumber), out) # Write image
if not success:
logging.error('A problem while writing file occurred')
sys.exit()
except:
pass # If error, pass file
totalFiles += 1 # Increment image number
fileNumber += 1
logging.debug("end of set\n")
logging.debug("\n")
for emotion in arguments.emotions:
if not os.path.exists(datasetPath + emotion):
os.makedirs(datasetPath + emotion)
for emotion in emotions:
detect_faces(emotion) # Call functional
logging.debug("{} Gesichter in {} Dateien gefunden.".format(totalFaces, totalFiles))
logging.debug("In {} Dateien wurde kein Gesicht gefunden:".format(totalFiles - totalFaces))
logging.debug('{} faces in {} files found'.format(totalFaces, totalFiles))
logging.debug('in {} files no face could be detected'.format(totalFiles - totalFaces))
for f in undetected:
logging.debug(f)
......@@ -7,21 +7,21 @@ import cv2
from RingBuffer import RingBuffer
from WebcamVideoStream import WebcamVideoStream
from face_detect import find_faces
from face_detect import extract_faces
from image_commons import nparray_as_image, draw_with_alpha
import numpy as np
def _load_emoticons(emotions):
"""
Lädt die Emoticons aus dem graphics Ordner.
Lädt die Emoticons aus dem emojis Ordner.
:param emotions: Array von Emotionen.
:return: Array von Emotions Grafiken.
"""
return [nparray_as_image(cv2.imread('resources/graphics/%s.png' % emotion, -1), mode=None) for emotion in emotions]
return [nparray_as_image(cv2.imread('resources/emojis/%s.png' % emotion, -1), mode=None) for emotion in emotions]
def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name='Mood Expression', update_time=1):
def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name='Project Mood', update_time=1):
"""
Zeigt ein Webcam-Bild, erkennt Gesichter und Emotionen in Echtzeit und zeichnet Emoticons neben die Gesichter.
:param model: Trainiertes Model
......@@ -31,40 +31,40 @@ def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name='M
:param update_time: Bildaktualisierungzeit.
"""
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
if window_size:
width, height = window_size
cv2.resizeWindow(window_name, width, height)
width, height = window_size
cv2.resizeWindow(window_name, width, height)
vc = WebcamVideoStream().start()
frame = vc.read()
"""
Der RingBuffer speichert die letzten 7 Predictions
"""
puffer = RingBuffer(7)
buffer = RingBuffer(7)
while True:
for normalized_face, (x, y, w, h) in find_faces(frame):
frame = vc.read()
while True:
for normalized_face in extract_faces(frame):
prediction = model.predict(normalized_face) # do prediction
"""
Seichert die Predictions
"""
puffer.append(prediction[0])
buffer.append(prediction[0])
"""
Holt die Einträge als Array
Holt die Einträge als Array
"""
preds = puffer.get()
predictions = buffer.get()
"""
Kein Eintrag im RingBuffer ist None
"""
if not (any(x is None for x in preds)):
if not (any(x is None for x in predictions)):
"""
Vorkommen der Predictions zählen
"""
unique, counts = np.unique(preds, return_counts=True)
unique, counts = np.unique(predictions, return_counts=True)
"""
Häufigster Wert wird dargestellt
......@@ -89,7 +89,6 @@ if __name__ == '__main__':
fisher_face = cv2.face.FisherFaceRecognizer_create()
"""Läadt das trainierte Model"""
fisher_face.read('basis_data/models/detection_model.xml')
show_webcam_and_run(fisher_face, emoticons)
"""Lädt das trainierte Model"""
fisher_face.read('resources/models/detection_model.xml')
show_webcam_and_run(fisher_face, emoticons)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment