Commit 9870a0ec authored by Arne Gerdes's avatar Arne Gerdes

Alle Klassen außer das Dataset

parent 4dc49d76
import cv2
def createwindow(name, view, x=70, y=70, w=150, h=150):
cv2.namedWindow(name, cv2.WINDOW_NORMAL)
cv2.resizeWindow(name, w, h)
cv2.moveWindow(name, x, y)
cv2.imshow(name, view)
\ No newline at end of file
import cv2
import cvhelper
import numpy as np
from matplotlib import pyplot as plt
original = cv2.imread('resources/experiments/trump.jpg')
cvhelper.createwindow("Original", original, 0, 0)
gray = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)
hsv = cv2.cvtColor(original, cv2.COLOR_BGR2HSV)
ret,thresh1 = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
cvhelper.createwindow("Gray", gray, 100)
cvhelper.createwindow("HSV", hsv, 200)
cvhelper.createwindow("Threshold", thresh1, 300)
plt.hist(gray.ravel(), 256, [0,256])
plt.show()
k = cv2.waitKey(0)
if k == cv2.waitKey(0) & 0xFF:
cv2.destroyAllWindows()
\ No newline at end of file
import cv2
import sys
import dlib
import numpy as np
import cvhelper
def main():
# Create the haar cascade
detector = cv2.CascadeClassifier('resources/haarcascade_frontalface_default.xml')
# Create the landmark predictor
predictor = dlib.shape_predictor("resources/shape_predictor_68_face_landmarks.dat")
cap = cv2.VideoCapture(0)
while True:
# Read the Video
ret, img = cap.read()
# convert the video to gray scale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Contrast Limited Adaptive Histogram Equalization
clahe = cv2.createCLAHE(clipLimit=4.0, tileGridSize=(8, 8))
clahe_image = clahe.apply(gray)
# Detect faces in the video
faces = detector.detectMultiScale(
clahe_image,
scaleFactor=1.05,
minNeighbors=5,
minSize=(100, 100),
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(clahe_image, (x, y), (x + w, y + h), (255, 0, 0), 2)
# Converting the OpenCV rectangle coordinates to Dlib rectangle
dlib_rect = dlib.rectangle(int(x), int(y), int(x + w), int(y + h))
# use that rectangle as the bounding box to detect the face landmarks,
# and extract out the coordinates of the landmarks so OpenCV can use them
detected_landmarks = predictor(clahe_image, dlib_rect).parts()
landmarks = np.matrix([[p.x, p.y] for p in detected_landmarks])
# enumerate through the landmark coordinates and mark them on the image
for idx, point in enumerate(landmarks):
pos = (point[0, 0], point[0, 1])
# annotate the positions
cv2.putText(clahe_image, str(idx), pos,
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.4,
color=(0, 0, 255))
# draw points on the landmark positions
cv2.circle(clahe_image, pos, 3, color=(0, 255, 255))
# draw the annotated image on an OpenCV window
cvhelper.createwindow('Mood', clahe_image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
\ No newline at end of file
import cv2
import dlib
import numpy as np
# Create the haar cascade
detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# create the landmark predictor
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
cap = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = cap.read(100)
# convert the video to gray scale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Contrast Limited Adaptive Histogram Equalization
clahe = cv2.createCLAHE(clipLimit=4.0, tileGridSize=(8, 8))
clahe_image = clahe.apply(gray)
# Detect faces in the video
faces = detector.detectMultiScale(
clahe_image,
scaleFactor=1.05,
minNeighbors=5,
minSize=(100, 100),
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(clahe_image, (x, y), (x + w, y + h), (255, 0, 0), 2)
# Converting the OpenCV rectangle coordinates to Dlib rectangle
dlib_rect = dlib.rectangle(int(x), int(y), int(x + w), int(y + h))
# use that rectangle as the bounding box to detect the face landmarks,
# and extract out the coordinates of the landmarks so OpenCV can use them
detected_landmarks = predictor(clahe_image, dlib_rect).parts()
landmarks = np.matrix([[p.x, p.y] for p in detected_landmarks])
# enumerate through the landmark coordinates and mark them on the image
for idx, point in enumerate(landmarks):
pos = (point[0, 0], point[0, 1])
# annotate the positions
cv2.putText(clahe_image, str(idx), pos,
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.4,
color=(0, 0, 255))
# draw points on the landmark positions
cv2.circle(clahe_image, pos, 3, color=(0, 255, 255))
# draw the annotated image on an OpenCV window
cv2.imshow('Window', clahe_image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
"""
This module contains face detections functions.
"""
import cv2
faceDet = cv2.CascadeClassifier("Haarcascade\\haarcascade_frontalface_default.xml")
faceDet_two = cv2.CascadeClassifier("Haarcascade\\haarcascade_frontalface_alt2.xml")
faceDet_three = cv2.CascadeClassifier("Haarcascade\\haarcascade_frontalface_alt.xml")
faceDet_four = cv2.CascadeClassifier("Haarcascade\\haarcascade_frontalface_alt_tree.xml")
def find_faces(image):
faces_coordinates = _locate_faces(image)
cutted_faces = [image[y:y + h, x:x + w] for (x, y, w, h) in faces_coordinates]
normalized_faces = [_normalize_face(face) for face in cutted_faces]
return zip(normalized_faces, faces_coordinates)
def _normalize_face(face):
face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
face = cv2.resize(face, (250, 250))
return face
def _locate_faces(image):
face_one = faceDet.detectMultiScale(image, scaleFactor=1.1, minNeighbors=15, minSize=(70, 70),
flags=cv2.CASCADE_SCALE_IMAGE)
face_two = faceDet_two.detectMultiScale(image, scaleFactor=1.1, minNeighbors=15, minSize=(70, 70),
flags=cv2.CASCADE_SCALE_IMAGE)
face_three = faceDet_three.detectMultiScale(image, scaleFactor=1.1, minNeighbors=15, minSize=(70, 70),
flags=cv2.CASCADE_SCALE_IMAGE)
face_four = faceDet_four.detectMultiScale(image, scaleFactor=1.1, minNeighbors=15, minSize=(70, 70),
flags=cv2.CASCADE_SCALE_IMAGE)
if len(face_one) == 1:
facefeatures = face_one
elif len(face_two) == 1:
facefeatures = face_two
elif len(face_three) == 1:
facefeatures = face_three
elif len(face_four) == 1:
facefeatures = face_four
else:
facefeatures = ""
return facefeatures
"""
This module contains functions used to manipulate images in OpenCV and PIL's Image.
"""
import cv2
import numpy as np
from PIL import Image
def image_as_nparray(image):
"""
Converts PIL's Image to numpy's array.
:param image: PIL's Image object.
:return: Numpy's array of the image.
"""
return np.asarray(image)
def nparray_as_image(nparray, mode='RGB'):
"""
Converts numpy's array of image to PIL's Image.
:param nparray: Numpy's array of image.
:param mode: Mode of the conversion. Defaults to 'RGB'.
:return: PIL's Image containing the image.
"""
return Image.fromarray(np.asarray(np.clip(nparray, 0, 255), dtype='uint8'), mode)
def load_image(source_path):
"""
Loads RGB image and converts it to grayscale.
:param source_path: Image's source path.
:return: Image loaded from the path and converted to grayscale.
"""
source_image = cv2.imread(source_path)
return cv2.cvtColor(source_image, cv2.COLOR_BGR2GRAY)
def draw_with_alpha(source_image, image_to_draw, coordinates):
"""
Draws a partially transparent image over another image.
:param source_image: Image to draw over.
:param image_to_draw: Image to draw.
:param coordinates: Coordinates to draw an image at. Tuple of x, y, width and height.
"""
x, y, w, h = coordinates
image_to_draw = image_to_draw.resize((h, w), Image.ANTIALIAS)
image_array = image_as_nparray(image_to_draw)
for c in range(0, 3):
source_image[y:y + h, x:x + w, c] = image_array[:, :, c] * (image_array[:, :, 3] / 255.0) \
+ source_image[y:y + h, x:x + w, c] * (1.0 - image_array[:, :, 3] / 255.0)
import glob
from shutil import copyfile
emotions = ["neutral", "anger", "contempt", "disgust", "fear", "happy", "sadness", "surprise"] # Define emotion order
participants = glob.glob("Basis_data\\source_emotion\\*") # Returns a list of all folders with participant numbers
for x in participants:
part = "%s" % x[-4:] # store current participant number
for sessions in glob.glob("Basis_data\\%s\\*" % x): # Store list of sessions for current participant
for files in glob.glob("Basis_data\\%s\\*" % sessions):
current_session = files[20:-30]
file = open(files, 'r')
emotion = int(
float(file.readline())) # emotions are encoded as a float, readline as float, then convert to integer.
sourcefile_emotion = glob.glob("Basis_data\\source_images\\%s\\%s\\*" % (part, current_session))[
-1] # get path for last image in sequence, which contains the emotion
sourcefile_neutral = glob.glob("Basis_data\\source_images\\%s\\%s\\*" % (part, current_session))[
0] # do same for neutral image
dest_neut = "Basis_data\\sorted_set\\neutral\\%s" % sourcefile_neutral[25:] # Generate path to put neutral image
dest_emot = "Basis_data\\sorted_set\\%s\\%s" % (
emotions[emotion], sourcefile_emotion[25:]) # Do same for emotion containing image
copyfile(sourcefile_neutral, dest_neut) # Copy file
copyfile(sourcefile_emotion, dest_emot) # Copy file
\ No newline at end of file
import cv2
import glob
import random
import numpy as np
emotions = ["neutral", "anger", "disgust", "happy", "surprise"] # Emotion list
fishface = cv2.face.FisherFaceRecognizer_create()
data = {}
def get_files(emotion): # Define function to get file list, randomly shuffle it and split 80/20
files = glob.glob("Basis_data\\dataset\\%s\\*" % emotion)
random.shuffle(files)
training = files[:int(len(files) * 0.8)] # get first 80% of file list
prediction = files[-int(len(files) * 0.2):] # get last 20% of file list
return training, prediction
def make_sets():
training_data = []
training_labels = []
prediction_data = []
prediction_labels = []
for emotion in emotions:
training, prediction = get_files(emotion)
# Append data to training and prediction list, and generate labels 0-7
for item in training:
image = cv2.imread(item) # open image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # convert to grayscale
training_data.append(gray) # append image array to training data list
training_labels.append(emotions.index(emotion))
for item in prediction: # repeat above process for prediction set
image = cv2.imread(item)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
prediction_data.append(gray)
prediction_labels.append(emotions.index(emotion))
return training_data, training_labels, prediction_data, prediction_labels
def run_recognizer():
training_data, training_labels, prediction_data, prediction_labels = make_sets()
print("training fisher face classifier")
print("size of training set is:", len(training_labels), "images")
fishface.train(training_data, np.asarray(training_labels))
print("predicting classification set")
cnt = 0
correct = 0
incorrect = 0
for image in prediction_data:
pred, conf = fishface.predict(image)
if pred == prediction_labels[cnt]:
correct += 1
cnt += 1
else:
incorrect += 1
cnt += 1
return ((100 * correct) / (correct + incorrect))
# Now run it
metascore = []
for i in range(0, 10):
correct = run_recognizer()
print("got", correct, "percent correct!")
metascore.append(correct)
print("\n\nend score:", np.mean(metascore), "percent correct!")
fishface.write('Basis_data/models/detection_model.xml')
\ No newline at end of file
import cv2
import glob
faceDet = cv2.CascadeClassifier('Haarcascade/haarcascade_frontalface_default.xml')
faceDet_two = cv2.CascadeClassifier('Haarcascade/haarcascade_frontalface_alt2.xml')
faceDet_three = cv2.CascadeClassifier('Haarcascade/haarcascade_frontalface_alt.xml')
faceDet_four = cv2.CascadeClassifier('Haarcascade/haarcascade_frontalface_alt_tree.xml')
emotions = ["neutral", "anger", "disgust", "happy", "surprise"] # Define emotions
def detect_faces(emotion):
files = glob.glob('Basis_data/sorted_set/%s/*' % emotion) # Get list of all images with emotion
filenumber = 0
for f in files:
frame = cv2.imread(f) # Open image
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert image to grayscale
# Detect face using 4 different classifiers
face = faceDet.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=10, minSize=(5, 5),
flags=cv2.CASCADE_SCALE_IMAGE)
face_two = faceDet_two.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=10, minSize=(5, 5),
flags=cv2.CASCADE_SCALE_IMAGE)
face_three = faceDet_three.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=10, minSize=(5, 5),
flags=cv2.CASCADE_SCALE_IMAGE)
face_four = faceDet_four.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=10, minSize=(5, 5),
flags=cv2.CASCADE_SCALE_IMAGE)
# Go over detected faces, stop at first detected face, return empty if no face.
if len(face) == 1:
facefeatures = face
elif len(face_two) == 1:
facefeatures = face_two
elif len(face_three) == 1:
facefeatures = face_three
elif len(face_four) == 1:
facefeatures = face_four
else:
facefeatures = ""
# Cut and save face
for (x, y, w, h) in facefeatures: # get coordinates and size of rectangle containing face
print("face found in file: %s" % f)
gray = gray[y:y + h, x:x + w] # Cut the frame to size
try:
out = cv2.resize(gray, (250, 250)) # Resize face so all images have same size
cv2.imwrite('Basis_data/dataset/%s/%s.jpg' % (emotion, filenumber), out) # Write image
except:
pass # If error, pass file
filenumber += 1 # Increment image number
for emotion in emotions:
detect_faces(emotion) # Call functiona
"""
This module is the main module in this package. It loads emotion recognition model from a file,
shows a webcam image, recognizes face and it's emotion and draw emotion on the image.
"""
from cv2 import WINDOW_NORMAL
import cv2
from face_detect import find_faces
from image_commons import nparray_as_image, draw_with_alpha
def _load_emoticons(emotions):
"""
Loads emotions images from graphics folder.
:param emotions: Array of emotions names.
:return: Array of emotions graphics.
"""
return [nparray_as_image(cv2.imread('Basis_data/graphics/%s.png' % emotion, -1), mode=None) for emotion in emotions]
def show_webcam_and_run(model, emoticons, window_size=None, window_name='webcam', update_time=10):
"""
Shows webcam image, detects faces and its emotions in real time and draw emoticons over those faces.
:param model: Learnt emotion detection model.
:param emoticons: List of emotions images.
:param window_size: Size of webcam image window.
:param window_name: Name of webcam image window.
:param update_time: Image update time interval.
"""
cv2.namedWindow(window_name, WINDOW_NORMAL)
if window_size:
width, height = window_size
cv2.resizeWindow(window_name, width, height)
vc = cv2.VideoCapture(0)
if vc.isOpened():
read_value, webcam_image = vc.read()
else:
print("webcam not found")
return
while read_value:
for normalized_face, (x, y, w, h) in find_faces(webcam_image):
prediction = model.predict(normalized_face) # do prediction
image_to_draw = emoticons[(prediction[0])]
draw_with_alpha(webcam_image, image_to_draw, (x-150, y-50, w-150, h-150))
cv2.imshow(window_name, webcam_image)
read_value, webcam_image = vc.read()
key = cv2.waitKey(update_time)
if key == 27: # exit on ESC
break
cv2.destroyWindow(window_name)
if __name__ == '__main__':
emotions = ['neutral', 'anger', 'disgust', 'happy', 'surprise']
emoticons = _load_emoticons(emotions)
# load mode
fisher_face = cv2.face.FisherFaceRecognizer_create()
fisher_face.read("Basis_data\\models\\detection_model.xml")
# use learnt model
window_name = 'WEBCAM (press ESC to exit)'
show_webcam_and_run(fisher_face, emoticons, window_size=(800, 800), window_name=window_name, update_time=10)
cycler==0.10.0
Django==1.11.6
dlib==19.7.0
emoji==0.4.5
image==1.5.16
imagesize==0.7.1
imutils==0.4.3
Keras==2.0.8
jupyter==1.0.0
jupyter-client==5.1.0
jupyter-console==5.2.0
jupyter-core==4.3.0
matplotlib==2.1.0
numpy==1.13.3
olefile==0.44
opencv-contrib-python==3.3.0.10
opencv-python==3.3.0.10
Pillow==4.3.0
pyparsing==2.2.0
python-dateutil==2.6.1
pytz==2017.2
PyYAML==3.12
pyzmq==16.0.21
requests==2.18.4
scipy==1.0.0
six==1.11.0
utils==0.9.0
Sphinx==1.6.5
sphinxcontrib-websupport==1.0.1
urllib3==1.22
utils==0.9.0
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment