Commit 4328dc1b authored by tihmels's avatar tihmels

Logs verbessert

parent ae8279ce
......@@ -2,24 +2,27 @@ import cv2
import glob
import random
import numpy as np
import logging
import sys
import signal
import logging
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename='logs/process_model.log')
tags = sys.argv
logging.debug('Fisherface training initialized')
emotions = ["anger", "disgust", "happy", "neutral", "surprise"] # Emotion list
fishface = cv2.face.FisherFaceRecognizer_create()
def get_files(emotion): # Define function to get file list, randomly shuffle it and split 80/20
def get_files_from_emotion(emotion): # Define function to get file list, randomly shuffle it and split 80/20
files = glob.glob('basis_data/dataset/%s/*' % emotion)
random.shuffle(files)
training = files[:int(len(files) * 0.8)] # get first 80% of file list
prediction = files[-int(len(files) * 0.2):] # get last 20% of file list
return training, prediction
def image_preprocessing(item):
image = cv2.imread(item) # open image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # convert to grayscale
equalized = cv2.equalizeHist(gray)
return equalized
def make_sets():
training_data = []
......@@ -27,32 +30,26 @@ def make_sets():
prediction_data = []
prediction_labels = []
for emotion in emotions:
training, prediction = get_files(emotion)
training, prediction = get_files_from_emotion(emotion)
# Append data to training and prediction list, and generate labels 0-7
for item in training:
image = cv2.imread(item) # open image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # convert to grayscale
training_data.append(gray) # append image array to training data list
img = image_preprocessing(item)
training_data.append(img) # append image array to training data list
training_labels.append(emotions.index(emotion))
for item in prediction: # repeat above process for prediction set
image = cv2.imread(item)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
prediction_data.append(gray)
img = image_preprocessing(item)
prediction_data.append(img)
prediction_labels.append(emotions.index(emotion))
return training_data, training_labels, prediction_data, prediction_labels
def run_recognizer():
training_data, training_labels, prediction_data, prediction_labels = make_sets()
print("training fisher face classifier")
print("size of training set is:", len(training_labels), "images")
fishface.train(training_data, np.asarray(training_labels))
print("predicting classification set")
cnt = 0
correct = 0
incorrect = 0
......@@ -66,22 +63,20 @@ def run_recognizer():
cnt += 1
return ((100 * correct) / (correct + incorrect))
if len(tags) > 1:
message = ', '.join(tags[1:])
logging.debug(message.upper())
emotions = ["anger", "disgust", "happy", "neutral", "surprise"] # Emotion list
fishface = cv2.face.FisherFaceRecognizer_create()
# Now run it
metascore = []
logging.debug("fisherface training initialized")
for i in range(0, 10):
correct = run_recognizer()
print("got", correct, "percent correct!")
logging.debug("{} : {}%".format(i, int(correct)))
metascore.append(correct)
print("\nend score:", np.mean(metascore), "percent correct!")
logging.info("finished {} iterations:".format(len(metascore)))
for idx, meta in enumerate(metascore):
logging.debug("{} : {}%".format(idx, meta))
logging.debug("fisherface finished with {}%\n".format(np.mean(metascore)))
logging.debug("{} iterations - {}% average\n".format(len(metascore), np.mean(metascore)))
fishface.write('basis_data/models/detection_model.xml')
\ No newline at end of file
......@@ -46,7 +46,6 @@ def show_webcam_and_run(model, emoticons, window_size=None, window_name='webcam'
while read_value:
for normalized_face, (x, y, w, h) in find_faces(webcam_image):
prediction = model.predict(normalized_face) # do prediction
cv2.imshow("Face", normalized_face)
image_to_draw = emoticons[(prediction[0])]
if x - 150 > 0 and y - 50 > 0 and w - 150 > 0 and h - 150 > 0:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment