Commit ae8279ce authored by tihmels's avatar tihmels
Browse files

Performance Tweaks vorgenommen

parent ccb507c9
......@@ -20,6 +20,7 @@ Icon
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
basis_data/*
# Directories potentially created on remote AFP share
.AppleDB
......
import cv2
import sys
import dlib
import numpy as np
import cvhelper
def main():
# Create the haar cascade
detector = cv2.CascadeClassifier('resources/haarcascade_frontalface_default.xml')
# Create the landmark predictor
predictor = dlib.shape_predictor("resources/shape_predictor_68_face_landmarks.dat")
cap = cv2.VideoCapture(0)
while True:
# Read the Video
ret, img = cap.read()
# convert the video to gray scale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Contrast Limited Adaptive Histogram Equalization
clahe = cv2.createCLAHE(clipLimit=4.0, tileGridSize=(8, 8))
clahe_image = clahe.apply(gray)
# Detect faces in the video
faces = detector.detectMultiScale(
clahe_image,
scaleFactor=1.05,
minNeighbors=5,
minSize=(100, 100),
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(clahe_image, (x, y), (x + w, y + h), (255, 0, 0), 2)
# Converting the OpenCV rectangle coordinates to Dlib rectangle
dlib_rect = dlib.rectangle(int(x), int(y), int(x + w), int(y + h))
# use that rectangle as the bounding box to detect the face landmarks,
# and extract out the coordinates of the landmarks so OpenCV can use them
detected_landmarks = predictor(clahe_image, dlib_rect).parts()
landmarks = np.matrix([[p.x, p.y] for p in detected_landmarks])
# enumerate through the landmark coordinates and mark them on the image
for idx, point in enumerate(landmarks):
pos = (point[0, 0], point[0, 1])
# annotate the positions
cv2.putText(clahe_image, str(idx), pos,
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.4,
color=(0, 0, 255))
# draw points on the landmark positions
cv2.circle(clahe_image, pos, 3, color=(0, 255, 255))
# draw the annotated image on an OpenCV window
cvhelper.createwindow('Mood', clahe_image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
\ No newline at end of file
import cv2
import cvhelper
import numpy as np
from matplotlib import pyplot as plt
from archive import cvhelper
original = cv2.imread('resources/experiments/trump.jpg')
cvhelper.createwindow("Original", original, 0, 0)
......
import cv2
import sys
import dlib
import numpy as np
import cvhelper
from archive import cvhelper
def main():
# Create the haar cascade
......
import cv2
def createwindow(name, view, x=70, y=70, w=600, h=600):
cv2.namedWindow(name, cv2.WINDOW_NORMAL)
cv2.resizeWindow(name, w, h)
cv2.moveWindow(name, x, y)
cv2.imshow(name, view)
\ No newline at end of file
import cv2
import cvhelper
import numpy as np
from matplotlib import pyplot as plt
original = cv2.imread('resources/experiments/trump.jpg')
cvhelper.createwindow("Original", original, 0, 0)
gray = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)
hsv = cv2.cvtColor(original, cv2.COLOR_BGR2HSV)
ret,thresh1 = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
cvhelper.createwindow("Gray", gray, 100)
cvhelper.createwindow("HSV", hsv, 200)
cvhelper.createwindow("Threshold", thresh1, 300)
plt.hist(gray.ravel(), 256, [0,256])
plt.show()
k = cv2.waitKey(0)
if k == cv2.waitKey(0) & 0xFF:
cv2.destroyAllWindows()
\ No newline at end of file
......@@ -3,10 +3,10 @@ This module contains face detections functions.
"""
import cv2
faceDet = cv2.CascadeClassifier('Haarcascade/haarcascade_frontalface_default.xml')
faceDet_two = cv2.CascadeClassifier('Haarcascade/haarcascade_frontalface_alt2.xml')
faceDet_three = cv2.CascadeClassifier('Haarcascade/haarcascade_frontalface_alt.xml')
faceDet_four = cv2.CascadeClassifier('Haarcascade/haarcascade_frontalface_alt_tree.xml')
faceDet = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_default.xml')
faceDet_two = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_alt2.xml')
faceDet_three = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_alt.xml')
faceDet_four = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_alt_tree.xml')
def find_faces(image):
......
......@@ -2,25 +2,25 @@ import glob
from shutil import copyfile
emotions = ["neutral", "anger", "contempt", "disgust", "fear", "happy", "sadness", "surprise"] # Define emotion order
participants = glob.glob("Basis_data\\source_emotion\\*") # Returns a list of all folders with participant numbers
participants = glob.glob("basis_data/source_emotion/*") # Returns a list of all folders with participant numbers
for x in participants:
part = "%s" % x[-4:] # store current participant number
for sessions in glob.glob("Basis_data\\%s\\*" % x): # Store list of sessions for current participant
for files in glob.glob("Basis_data\\%s\\*" % sessions):
for sessions in glob.glob("basis_data/%s/*" % x): # Store list of sessions for current participant
for files in glob.glob("basis_data/%s/*" % sessions):
current_session = files[20:-30]
file = open(files, 'r')
emotion = int(
float(file.readline())) # emotions are encoded as a float, readline as float, then convert to integer.
sourcefile_emotion = glob.glob("Basis_data\\source_images\\%s\\%s\\*" % (part, current_session))[
sourcefile_emotion = glob.glob("basis_data/source_images/%s/%s/*" % (part, current_session))[
-1] # get path for last image in sequence, which contains the emotion
sourcefile_neutral = glob.glob("Basis_data\\source_images\\%s\\%s\\*" % (part, current_session))[
sourcefile_neutral = glob.glob("basis_data/source_images/%s/%s/*" % (part, current_session))[
0] # do same for neutral image
dest_neut = "Basis_data\\sorted_set\\neutral\\%s" % sourcefile_neutral[25:] # Generate path to put neutral image
dest_emot = "Basis_data\\sorted_set\\%s\\%s" % (
dest_neut = "basis_data/sorted_set/neutral/%s" % sourcefile_neutral[25:] # Generate path to put neutral image
dest_emot = "basis_data/sorted_set/%s/%s" % (
emotions[emotion], sourcefile_emotion[25:]) # Do same for emotion containing image
copyfile(sourcefile_neutral, dest_neut) # Copy file
......
......@@ -14,7 +14,7 @@ emotions = ["anger", "disgust", "happy", "neutral", "surprise"] # Emotion list
fishface = cv2.face.FisherFaceRecognizer_create()
def get_files(emotion): # Define function to get file list, randomly shuffle it and split 80/20
files = glob.glob('Basis_data/dataset/%s/*' % emotion)
files = glob.glob('basis_data/dataset/%s/*' % emotion)
random.shuffle(files)
training = files[:int(len(files) * 0.8)] # get first 80% of file list
prediction = files[-int(len(files) * 0.2):] # get last 20% of file list
......@@ -84,4 +84,4 @@ for idx, meta in enumerate(metascore):
logging.debug("fisherface finished with {}%\n".format(np.mean(metascore)))
fishface.write('Basis_data/models/detection_model.xml')
\ No newline at end of file
fishface.write('basis_data/models/detection_model.xml')
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment