process_model.py 6.26 KB
Newer Older
tihmels's avatar
tihmels committed
1 2 3 4 5 6 7 8 9 10 11 12
# ***************************************************
# * Copyright © 2010-2011 Tjado Ihmels <tjado.ihmels@uni-oldenburg.de>
# *
# * This file is part of whiteboard-project-matcher.
# *
# * whiteboard-project-matcher can not be copied and/or distributed without the express
# * permission of Tjado Ihmels
# *
# * Parts of this program are legally copied and adapted from
# * van Gent, P. (2016). Emotion Recognition With Python, OpenCV and a Face Dataset. A tech blog about fun things with Python and embedded electronics. Retrieved from:
# * http://www.paulvangent.com/2016/04/01/emotion-recognition-with-python-opencv-and-a-face-dataset/
# ***************************************************/
13

14
import argparse
15
import glob
tihmels's avatar
tihmels committed
16
import logging
Arne Gerdes's avatar
Arne Gerdes committed
17
import random
tihmels's avatar
tihmels committed
18 19
import sys

Arne Gerdes's avatar
Arne Gerdes committed
20 21
import cv2
import numpy as np
22

23 24
from email_service import sendMail

tihmels's avatar
tihmels committed
25 26
# This program takes the pictures from the dataset and generates a trained model

27 28
logfile = 'logs/process_model.log'

29
# Creates and configures the logger
30
logging.basicConfig(level=logging.NOTSET, format='%(asctime)s %(levelname)-8s %(message)s',
tihmels's avatar
tihmels committed
31
                    datefmt='%m-%d %H:%M',
32
                    filename=logfile)
33

34
# Argument parser allows to specify program parameters
35
parser = argparse.ArgumentParser(description='Process Model Application')
tihmels's avatar
tihmels committed
36 37 38 39 40 41 42 43
parser.add_argument('-0', action='append_const', dest='emotions', const='neutral', help='neutral')
parser.add_argument('-1', action='append_const', dest='emotions', const='happy', help='happy')
parser.add_argument('-2', action='append_const', dest='emotions', const='sadness', help='sadness')
parser.add_argument('-3', action='append_const', dest='emotions', const='surprise', help='surprise')
parser.add_argument('-4', action='append_const', dest='emotions', const='fear', help='fear')
parser.add_argument('-5', action='append_const', dest='emotions', const='disgust', help='disgust')
parser.add_argument('-6', action='append_const', dest='emotions', const='anger', help='anger')
parser.add_argument('-d', '--dataset', action='store', dest='dataset', default='resources/img_data/dataset/',
Arne Gerdes's avatar
Arne Gerdes committed
44
                    help='path to dataset')
tihmels's avatar
tihmels committed
45
parser.add_argument('-r', '--ratio', action='store', dest='ratio', help='the relative size of the training set in float [0.1,1]', default=1, type=float)
tihmels's avatar
tihmels committed
46
parser.add_argument('-i' '--iterations', action='store', dest='iterations', type=int, default=1,
47
                    help='number of iterations')
tihmels's avatar
tihmels committed
48
parser.add_argument('-p', '--properties', nargs='*', dest='properties', help='pre-processing steps for logging')
49 50 51
parser.add_argument('-t', '--test', action='store_true', dest='test', help='prevent writing new model to classifier')
parser.add_argument('-c', '--csv', action='store_true', dest='csv', help='activate csv output')
parser.add_argument('-x', '--email', action='store_true', dest='email', help='activate email notifications')
52 53 54
arguments = parser.parse_args()
logging.debug(arguments)

55 56 57
if not arguments.emotions:
    print('No emotions declared')
    sys.exit()
58 59

logging.info('Fisherface training started')
60 61 62

def _get_faces_from_emotion(emotion):
    """
63 64 65
    Get all the files to an emotion from the dataset, mix them, and split them into a training and test set.
     : param emotion: The emotion
     : return: training, prediction
66
    """
67
    files = glob.glob(arguments.dataset + '{}/*'.format(emotion))
68
    random.shuffle(files)
69

tihmels's avatar
tihmels committed
70 71 72
    # Mix the dataset in training and comparison images in the given ratio
    training = files[:int(len(files) * arguments.ratio)]
    prediction = files[-int(len(files) * (1 - arguments.ratio)):]
73

74 75
    return training, prediction

76

77 78
def image_preprocessing(image):
    """
79 80
    Preprocessing of the files
     : param item: picture
81 82 83
    """
    img = cv2.imread(image)  # open image
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # convert to grayscale
tihmels's avatar
tihmels committed
84 85 86
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    face = clahe.apply(gray)
    return face
87

88

89
def make_sets():
90 91 92 93
    """
    Creates the training set
    :return: The created training data
    """
94 95 96 97
    training_data = []
    training_labels = []
    prediction_data = []
    prediction_labels = []
98
    for emotion in arguments.emotions:
99
        training, prediction = _get_faces_from_emotion(emotion)
100 101
        # Append data to training and prediction list, and generate labels 0-7
        for item in training:
tihmels's avatar
tihmels committed
102
            img = image_preprocessing(item)
103 104
            # append image array to training data list
            training_data.append(img)
105
            training_labels.append(arguments.emotions.index(emotion))
106

107 108
        # repeat above process for prediction set
        for item in prediction:
tihmels's avatar
tihmels committed
109 110
            img = image_preprocessing(item)
            prediction_data.append(img)
111
            prediction_labels.append(arguments.emotions.index(emotion))
112 113 114

    return training_data, training_labels, prediction_data, prediction_labels

115

116
def run_recognizer():
117 118 119 120
    """
    Performs the actual training with Fisherfaces and Logged the results
    :return: The correctly recognized faces in percent
    """
121 122
    training_data, training_labels, prediction_data, prediction_labels = make_sets()

tihmels's avatar
tihmels committed
123
    logging.debug('Training...')
124 125 126 127 128
    fishface.train(training_data, np.asarray(training_labels))

    cnt = 0
    correct = 0
    incorrect = 0
tihmels's avatar
tihmels committed
129
    logging.debug('Prediction...')
130 131 132 133 134 135 136 137
    for image in prediction_data:
        pred, conf = fishface.predict(image)
        if pred == prediction_labels[cnt]:
            correct += 1
            cnt += 1
        else:
            incorrect += 1
            cnt += 1
138
    return (100 * correct) / (correct + incorrect)
139

Arne Gerdes's avatar
Arne Gerdes committed
140

141
# Emotions list
tihmels's avatar
tihmels committed
142
fishface = cv2.face.FisherFaceRecognizer_create()
143
metascore = []
tihmels's avatar
tihmels committed
144

145
# Argument parser
146
for i in range(1, arguments.iterations + 1):
147
    correct = run_recognizer()
148
    logging.info("{} : {}%".format(i, int(correct)))
149
    metascore.append(correct)
150

151
# Argument parser
152 153
if arguments.csv:
    file = open("resources/csv/{}.csv".format('_'.join(arguments.properties).lower()), "w")
154 155 156 157 158 159 160
    for entry in metascore:
        file.write("{}\n".format(int(entry)))

    file.close()

logging.info("Fisherface training finished - {}% average\n".format(np.mean(metascore)))

161
# Argument parser
162
if not arguments.test:
tihmels's avatar
tihmels committed
163
    fishface.write('resources/models/detection_model.xml')
164

165
# Argument parser
166
if arguments.email:
tihmels's avatar
tihmels committed
167
    sendMail('Fisherface training finished', body=str(arguments), filepath='resources/models/detection_model.xml')