Commit 1d81154b authored by tihmels's avatar tihmels

Rafffaaactoringgggg

parent bca0c598
...@@ -5,13 +5,6 @@ from shutil import copyfile ...@@ -5,13 +5,6 @@ from shutil import copyfile
import pandas as pd import pandas as pd
logfile = 'affectnet.log'
# Erstellt und konfiguriert die Logdatei
logging.basicConfig(level=logging.NOTSET, format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename=logfile)
parser = argparse.ArgumentParser(description='AffectNet PrePipeline') parser = argparse.ArgumentParser(description='AffectNet PrePipeline')
parser.add_argument('csv', action='store', help='the .csv file to process') parser.add_argument('csv', action='store', help='the .csv file to process')
parser.add_argument('source', action='store', help='image source folder') parser.add_argument('source', action='store', help='image source folder')
......
Dataset Readme Dataset Readme
""""""""""""""""" """""""""""""""""
Die Dateien *affectnet.py*, *cohn_kanande.py* und *fer2013.py* dienen der Vorverarbeitung der jeweiligen Datenbanken, Die Dateien *affectnet.py*, *cohn_kanande.py* und *fer2013.py* dienen der Vorverarbeitung der jeweiligen Datenbanken und
zur Übertragung der Bilddaten und Metainformationen in eine für unser Projekt geeignete Infrastruktur. zur Übertragung der Bilddaten und Metainformationen in eine für unser Projekt geeignete Infrastruktur.
\ No newline at end of file
...@@ -24,15 +24,20 @@ Argument Parser erlaubt Parameter für die Verarbeitung anzugeben. ...@@ -24,15 +24,20 @@ Argument Parser erlaubt Parameter für die Verarbeitung anzugeben.
""" """
parser = argparse.ArgumentParser(description='Process Model Application') parser = argparse.ArgumentParser(description='Process Model Application')
parser.add_argument('--dataset', action='store', dest='dataset', default='resources/img_data/dataset/', parser.add_argument('-0', action='append_const', dest='emotions', const='neutral', help='neutral')
parser.add_argument('-1', action='append_const', dest='emotions', const='happy', help='happy')
parser.add_argument('-2', action='append_const', dest='emotions', const='sadness', help='sadness')
parser.add_argument('-3', action='append_const', dest='emotions', const='surprise', help='surprise')
parser.add_argument('-4', action='append_const', dest='emotions', const='fear', help='fear')
parser.add_argument('-5', action='append_const', dest='emotions', const='disgust', help='disgust')
parser.add_argument('-6', action='append_const', dest='emotions', const='anger', help='anger')
parser.add_argument('-d', '--dataset', action='store', dest='dataset', default='resources/img_data/dataset/',
help='path to dataset') help='path to dataset')
parser.add_argument('-i', action='store', dest='iterations', type=int, default=30, help='number of iterations') parser.add_argument('-i' '--iterations', action='store', dest='iterations', type=int, default=30, help='number of iterations')
parser.add_argument('-e', action='append', dest='emotions', default=['happy', 'neutral', 'sadness', 'surprise'], parser.add_argument('-p', '--properties', nargs='+', dest='properties', help='pre-processing steps for logging')
help='declare emotions that should be processed') parser.add_argument('-t', '--test', action='store_true', help='prevent writing new model to classifier')
parser.add_argument('-p', action='append', dest='properties', help='pre-processing steps for logging') parser.add_argument('-c', '--csv', action='store_true', help='activate csv processing')
parser.add_argument('--test', action='store_true', help='prevent writing new model to classifier') parser.add_argument('-x', '--email', action='store_true', help='activate email notifications')
parser.add_argument('--csv', action='store_true', help='activate csv processing')
parser.add_argument('--email', action='store_true', help='activate email notifications')
arguments = parser.parse_args() arguments = parser.parse_args()
logging.debug(arguments) logging.debug(arguments)
......
...@@ -19,19 +19,24 @@ logging.basicConfig(level=logging.NOTSET, format='%(asctime)s %(name)-12s %(leve ...@@ -19,19 +19,24 @@ logging.basicConfig(level=logging.NOTSET, format='%(asctime)s %(name)-12s %(leve
Argument Parser erlaubt Parameter für die Verarbeitung anzugeben. Argument Parser erlaubt Parameter für die Verarbeitung anzugeben.
""" """
parser = argparse.ArgumentParser(description='Sorted Set Face Creator Application') parser = argparse.ArgumentParser(description='Dataset Preprocessor')
parser.add_argument('--source', action='store', dest='img_source', default='resources/img_data/sorted_set/', parser.add_argument('-0', action='append_const', dest='emotions', const='neutral', help='neutral')
parser.add_argument('-1', action='append_const', dest='emotions', const='happy', help='happy')
parser.add_argument('-2', action='append_const', dest='emotions', const='sadness', help='sadness')
parser.add_argument('-3', action='append_const', dest='emotions', const='surprise', help='surprise')
parser.add_argument('-4', action='append_const', dest='emotions', const='fear', help='fear')
parser.add_argument('-5', action='append_const', dest='emotions', const='disgust', help='disgust')
parser.add_argument('-6', action='append_const', dest='emotions', const='anger', help='anger')
parser.add_argument('-s', '--source', action='store', dest='img_source', default='resources/img_data/sorted_set/',
help='path to image source') help='path to image source')
parser.add_argument('--dataset', action='store', dest='dataset', default='resources/img_data/dataset/', parser.add_argument('-d', '--dataset', action='store', dest='dataset', default='resources/img_data/dataset/',
help='path to dataset') help='path to dataset')
parser.add_argument('-r', action='store', dest='resize', default=150, type=int, help='resize factor') parser.add_argument('-r', '--resize', action='store', dest='resize', default=250, type=int, help='resize factor')
parser.add_argument('-e', action='append', dest='emotions', default=['happy', 'neutral', 'surprise'], parser.add_argument('-c', '--scale', action='store', dest='scaleFactor', default=1.1, type=float,
help='declare emotions that should be processed') help='scale factor (haar)')
parser.add_argument('-c', action='store', dest='scaleFactor', default=1.1, type=float, parser.add_argument('-n', '--minneighbors', action='store', dest='minNeighbors', default=6, type=int, help='min neighbors (haar)')
help='scale factor - haar') parser.add_argument('-m', '--minsize', action='store', dest='minSize', default=50, type=int, help='min size (haar)')
parser.add_argument('-n', action='store', dest='minNeighbors', default=6, type=int, help='min neighbors - haar') parser.add_argument('-x', '--email', action='store_true', help='activate email notifications')
parser.add_argument('-s', action='store', dest='minSize', default=40, type=int, help='min size - haar')
parser.add_argument('--email', action='store_true', help='activate email notifications')
arguments = parser.parse_args() arguments = parser.parse_args()
logging.debug(arguments) logging.debug(arguments)
...@@ -44,21 +49,22 @@ minNeighbors = arguments.minNeighbors ...@@ -44,21 +49,22 @@ minNeighbors = arguments.minNeighbors
minSize = arguments.minSize minSize = arguments.minSize
email = arguments.email email = arguments.email
if len(glob.glob(dataset_path + '*')) > 0: if len(os.listdir(dataset_path)) > 0:
deleteDataset = input( delete_data = input(
'Im Dataset befinden sich Dateien. Durch diesen Vorgang werden die existierenden Daten gelöscht. Fortfahren (y/n): ') 'Im Dataset befinden sich Dateien. Durch diesen Vorgang werden die existierenden Daten gelöscht. Fortfahren (y/n): ')
if deleteDataset == 'y': if delete_data == 'y':
for file in glob.glob(dataset_path + '*'): dir = [f for f in os.listdir(dataset_path) if os.path.isdir(os.path.join(dataset_path, f))]
shutil.rmtree(file) for d in dir:
shutil.rmtree(os.path.join(dataset_path, d))
else: else:
print('process aborted by user')
sys.exit() sys.exit()
totalFiles: int = 0 totalFiles: int = 0
totalFaces: int = 0 totalFaces: int = 0
undetected: list = [] undetected: list = []
def detect_faces(emotion): def detect_faces(emotion):
""" """
Holt alle Dateien zu einer Emotion aus dem sorted_set Holt alle Dateien zu einer Emotion aus dem sorted_set
......
...@@ -10,27 +10,24 @@ from RingBuffer import RingBuffer ...@@ -10,27 +10,24 @@ from RingBuffer import RingBuffer
from WebcamVideoStream import WebcamVideoStream from WebcamVideoStream import WebcamVideoStream
from face_detect import extract_faces from face_detect import extract_faces
from image_commons import nparray_as_image, draw_with_alpha from image_commons import nparray_as_image, draw_with_alpha
import os
parser = argparse.ArgumentParser(description='ProjectMood Camplication') parser = argparse.ArgumentParser(description='ProjectMood Emotion Detection')
parser.add_argument('-b', action='store', dest='buffer', default=10, type=int, help='size of ringbuffer') parser.add_argument('-b', '--buffer', action='store', dest='buffer', default=12, type=int, help='size of ringbuffer')
parser.add_argument('-e', action='append', dest='emotions', default=['happy', 'neutral', 'sadness', 'surprise'], parser.add_argument('-m', '--model', action='store', dest='model', default='resources/models/detection_model.xml',
help='declare emotions that should be detected')
parser.add_argument('-r', action='append', dest='resize', default=150, help='resize factor')
parser.add_argument('--model', action='store', dest='model', default='resources/models/detection_model.xml',
help='path to model') help='path to model')
arguments = parser.parse_args() arguments = parser.parse_args()
def _load_emoticons(emotions): def _load_emoticons(emotions):
""" """
Lädt die Emoticons aus dem emojis Ordner. Lädt die Emoticons aus dem emojis Ordner.
:param emotions: Array von Emotionen. :param emotions: Emotionen als Array.
:return: Array von Emotions Grafiken. :return: Array von Emotions Grafiken.
""" """
return [nparray_as_image(cv2.imread('resources/emojis/%s.png' % emotion, -1), mode=None) for emotion in emotions] return [nparray_as_image(cv2.imread('resources/emojis/{}.png'.format(emotion), -1), mode=None) for emotion in emotions]
def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name='Project Mood', update_time=1): def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name=parser.description, update_time=1):
""" """
Zeigt ein Webcam-Bild, erkennt Gesichter und Emotionen in Echtzeit und zeichnet Emoticons neben die Gesichter. Zeigt ein Webcam-Bild, erkennt Gesichter und Emotionen in Echtzeit und zeichnet Emoticons neben die Gesichter.
:param model: Trainiertes Model :param model: Trainiertes Model
...@@ -45,6 +42,10 @@ def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name='P ...@@ -45,6 +42,10 @@ def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name='P
vc = WebcamVideoStream().start() vc = WebcamVideoStream().start()
# ein zufälliges Bild aus dem Dataset um das Bildformat zu bestimmen (für Fisherface wichtig)
random = cv2.imread('resources/img_data/dataset/{}/0.jpg'.format(emotions[0]))
resizefactor = np.size(random, 0)
""" """
Der RingBuffer speichert die letzten x Predictions Der RingBuffer speichert die letzten x Predictions
""" """
...@@ -53,7 +54,7 @@ def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name='P ...@@ -53,7 +54,7 @@ def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name='P
frame = vc.read() frame = vc.read()
while True: while True:
for normalized_face in extract_faces(frame, arguments.resize): for normalized_face in extract_faces(frame, resizefactor):
prediction = model.predict(normalized_face) # do prediction prediction = model.predict(normalized_face) # do prediction
""" """
...@@ -93,11 +94,13 @@ def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name='P ...@@ -93,11 +94,13 @@ def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name='P
if __name__ == '__main__': if __name__ == '__main__':
emotions = arguments.emotions # Die Emotionen die im Datasetordner liegen, sollen auch in der Applikation hochgefahren werden
_, emotions, _ = next(os.walk('resources/img_data/dataset'), (None, [], None))
emoticons = _load_emoticons(emotions) emoticons = _load_emoticons(emotions)
fisher_face = cv2.face.FisherFaceRecognizer_create() fisher_face = cv2.face.FisherFaceRecognizer_create()
"""Lädt das trainierte Model""" # Lädt das trainierte Model
fisher_face.read(arguments.model) fisher_face.read(arguments.model)
show_webcam_and_run(fisher_face, emoticons) show_webcam_and_run(fisher_face, emoticons)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment