import cv2 import glob import random import numpy as np import sys import logging logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M', filename='logs/process_model.log') args = sys.argv # liest Input Parameter logging.debug('Fisherface training initialized') file = open("{}.csv".format('_'.join(args[1:]).lower()), "w") def _get_faces_from_emotion(emotion): """ Holt alle Dateien zu einer Emotion aus dem Dataset, mischt sie und teilt sie in ein Trainings- und Prognoseset. :param emotion: Die Emotion :return: training, prediction """ files = glob.glob('basis_data/dataset/{}/*'.format(emotion)) random.shuffle(files) training = files[:int(len(files) * 0.8)] # get first 80% of file list prediction = files[-int(len(files) * 0.2):] # get last 20% of file list return training, prediction def image_preprocessing(image): """ Preprocessing der Dateien :param item: Bild :return: """ img = cv2.imread(image) # open image gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # convert to grayscale blur = cv2.GaussianBlur(gray, (5, 5), 0) return blur def make_sets(): training_data = [] training_labels = [] prediction_data = [] prediction_labels = [] for emotion in emotions: training, prediction = _get_faces_from_emotion(emotion) # Append data to training and prediction list, and generate labels 0-7 for item in training: img = image_preprocessing(item) training_data.append(img) # append image array to training data list training_labels.append(emotions.index(emotion)) for item in prediction: # repeat above process for prediction set img = image_preprocessing(item) prediction_data.append(img) prediction_labels.append(emotions.index(emotion)) return training_data, training_labels, prediction_data, prediction_labels def run_recognizer(): training_data, training_labels, prediction_data, prediction_labels = make_sets() fishface.train(training_data, np.asarray(training_labels)) cnt = 0 correct = 0 incorrect = 0 for image in prediction_data: pred, conf = fishface.predict(image) if pred == prediction_labels[cnt]: correct += 1 cnt += 1 else: incorrect += 1 cnt += 1 return ((100 * correct) / (correct + incorrect)) if len(args) > 1: tags = ', '.join(args[1:]) logging.debug(tags.upper()) emotions = ["happy", "neutral", "surprise"] # Emotion list fishface = cv2.face.FisherFaceRecognizer_create() # Now run it metascore = [] for i in range(0, 10): correct = run_recognizer() file.write("{}\n".format(int(correct))) logging.debug("{} : {}%".format(i, int(correct))) metascore.append(correct) file.close() logging.debug("{} iterations - {}% average\n".format(len(metascore), np.mean(metascore))) fishface.write('basis_data/models/detection_model.xml')