Commit 95a0fb14 authored by Arne Gerdes's avatar Arne Gerdes

Folder Classifier hinzugefuegt zum Testen von Gesichtserkennung mit Maschine Learning

parent 88e6d172
# The Emotion Face detection Scripts
# You can modify this script as you wish
import cv2
import glob as gb
import random
import numpy as np
# Emotion list
emojis = ["neutral", "anger", "contempt", "disgust", "fear", "happy", "sadness", "surprise"]
# Initialize fisher face classifier
fisher_face = cv2.createFisherFaceRecognizer()
data = {}
# Function defination to get file list, randomly shuffle it and split 67/33
def getFiles(emotion):
files = gb.glob("final_dataset\\%s\\*" % emotion)
random.shuffle(files)
training = files[:int(len(files) * 0.67)] # get first 67% of file list
prediction = files[-int(len(files) * 0.33):] # get last 33% of file list
return training, prediction
def makeTrainingAndValidationSet():
training_data = []
training_labels = []
prediction_data = []
prediction_labels = []
for emotion in emojis:
training, prediction = getFiles(emotion)
# Append data to training and prediction list, and generate labels 0-7
for item in training:
image = cv2.imread(item) # open image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # convert to grayscale
training_data.append(gray) # append image array to training data list
training_labels.append(emojis.index(emotion))
for item in prediction: # repeat above process for prediction set
image = cv2.imread(item)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
prediction_data.append(gray)
prediction_labels.append(emojis.index(emotion))
return training_data, training_labels, prediction_data, prediction_labels
def runClassifier():
training_data, training_labels, prediction_data, prediction_labels = makeTrainingAndValidationSet()
print("training fisher face classifier suing the training data")
print("size of training set is:", len(training_labels), "images")
fisher_face.train(training_data, np.asarray(training_labels))
print("classification prediction")
counter = 0
right = 0
wrong = 0
for image in prediction_data:
pred, conf = fisher_face.predict(image)
if pred == prediction_labels[counter]:
right += 1
counter += 1
else:
wrong += 1
counter += 1
return (100 * right) / (right + wrong)
# Now run the classifier
metascore = []
for i in range(0, 10):
right = runClassifier()
print("got", right, "percent right!")
metascore.append(right)
print("\n\nend score:", np.mean(metascore), "percent right!")
\ No newline at end of file
import cv2
import glob as gb
face_detector1 = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
face_detector2 = cv2.CascadeClassifier("haarcascade_frontalface_alt2.xml")
face_detector3 = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
face_detector4 = cv2.CascadeClassifier("haarcascade_frontalface_alt_tree.xml")
emotion_list = ["neutral", "anger", "contempt", "disgust", "fear", "happy", "sadness", "surprise"]
def faceDetection(emotion):
files = gb.glob("selected_set\\%s\\*" % emotion) # Get list of all images with emotion
filenumber = 0
for f in files:
frame = cv2.imread(f) # Open image
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert image to grayscale
# Detect face using 4 different classifiers
face1 = face_detector1.detectMultiScale(gray, scaleFactor=1.1,
minNeighbors=10, minSize=(5, 5), flags=cv2.CASCADE_SCALE_IMAGE)
face2 = face_detector2.detectMultiScale(gray, scaleFactor=1.1,
minNeighbors=10, minSize=(5, 5), flags=cv2.CASCADE_SCALE_IMAGE)
face3 = face_detector3.detectMultiScale(gray, scaleFactor=1.1,
minNeighbors=10, minSize=(5, 5), flags=cv2.CASCADE_SCALE_IMAGE)
face4 = face_detector4.detectMultiScale(gray, scaleFactor=1.1,
minNeighbors=10, minSize=(5, 5), flags=cv2.CASCADE_SCALE_IMAGE)
# Go over detected faces, stop at first detected face, return empty if no face.
if len(face1) == 1:
facefeatures = face1
elif len(face2) == 1:
facefeatures == face2
elif len(face3) == 1:
facefeatures = face3
elif len(face4) == 1:
facefeatures = face4
else:
facefeatures = ""
# Cut and save face
for (x, y, w, h) in facefeatures: # get coordinates and size of rectangle containing face
print("face found in file: %s" % f)
gray = gray[y:y + h, x:x + w] # Cut the frame to size
try:
out = cv2.resize(gray, (350, 350)) # Resize face so all images have same size
cv2.imwrite("final_dataset\\%s\\%s.jpg" % (emotion, filenumber), out) # Write image
except:
pass # pass the file on error
filenumber += 1 # Increment image number
if __name__ == '__main__':
for emotion in emotion_list:
faceDetection(emotion) # Call our face detection module
import glob as gb
from shutil import copyfile
emotions_list = ["neutral", "anger", "contempt", "disgust", "fear", "happy", "sadness", "surprise"]
emotions_folders = gb.glob("emotions\\*") # Returns a list of all folders with participant numbers
def imageWithEmotionEtraction():
for x in emotions_folders:
participant = "%s" % x[-4:] # store current participant number
for sessions in gb.glob("%s\\*" % x):
for files in gb.glob("%s\\*" % sessions):
current_session = files[20:-30]
file = open(files, 'r')
emotion = int(float(file.readline()))
# get path for last image in sequence, which contains the emotion
sourcefile_emotion = gb.glob("images\\%s\\%s\\*" % (participant, current_session))[-1]
# do same for neutral image
sourcefile_neutral = gb.glob("images\\%s\\%s\\*" % (participant, current_session))[0]
# Generate path to put neutral image
dest_neut = "selected_set\\neutral\\%s" % sourcefile_neutral[25:]
# Do same for emotion containing image
dest_emot = "selected_set\\%s\\%s" % (emotions_list[emotion], sourcefile_emotion[25:])
copyfile(sourcefile_neutral, dest_neut) # Copy file
copyfile(sourcefile_emotion, dest_emot) # Copy file
if __name__ == '__main__':
imageWithEmotionEtraction()
\ No newline at end of file
......@@ -12,11 +12,11 @@ cap = cv2.VideoCapture(0)
while True:
# Read the Video
ret, img = cap.read()
# Capture frame-by-frame
ret, frame = cap.read(100)
# convert the video to gray scale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Contrast Limited Adaptive Histogram Equalization
clahe = cv2.createCLAHE(clipLimit=4.0, tileGridSize=(8, 8))
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment