Commit ae8279ce authored by tihmels's avatar tihmels

Performance Tweaks vorgenommen

parent ccb507c9
......@@ -20,6 +20,7 @@ Icon
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
basis_data/*
# Directories potentially created on remote AFP share
.AppleDB
......
import cv2
import cvhelper
import numpy as np
from matplotlib import pyplot as plt
original = cv2.imread('resources/experiments/trump.jpg')
cvhelper.createwindow("Original", original, 0, 0)
gray = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)
hsv = cv2.cvtColor(original, cv2.COLOR_BGR2HSV)
ret,thresh1 = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
cvhelper.createwindow("Gray", gray, 100)
cvhelper.createwindow("HSV", hsv, 200)
cvhelper.createwindow("Threshold", thresh1, 300)
plt.hist(gray.ravel(), 256, [0,256])
plt.show()
k = cv2.waitKey(0)
if k == cv2.waitKey(0) & 0xFF:
cv2.destroyAllWindows()
\ No newline at end of file
import cv2
import sys
import dlib
import numpy as np
import cvhelper
def main():
# Create the haar cascade
detector = cv2.CascadeClassifier('resources/haarcascade_frontalface_default.xml')
# Create the landmark predictor
predictor = dlib.shape_predictor("resources/shape_predictor_68_face_landmarks.dat")
cap = cv2.VideoCapture(0)
while True:
# Read the Video
ret, img = cap.read()
# convert the video to gray scale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Contrast Limited Adaptive Histogram Equalization
clahe = cv2.createCLAHE(clipLimit=4.0, tileGridSize=(8, 8))
clahe_image = clahe.apply(gray)
# Detect faces in the video
faces = detector.detectMultiScale(
clahe_image,
scaleFactor=1.05,
minNeighbors=5,
minSize=(100, 100),
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(clahe_image, (x, y), (x + w, y + h), (255, 0, 0), 2)
# Converting the OpenCV rectangle coordinates to Dlib rectangle
dlib_rect = dlib.rectangle(int(x), int(y), int(x + w), int(y + h))
# use that rectangle as the bounding box to detect the face landmarks,
# and extract out the coordinates of the landmarks so OpenCV can use them
detected_landmarks = predictor(clahe_image, dlib_rect).parts()
landmarks = np.matrix([[p.x, p.y] for p in detected_landmarks])
# enumerate through the landmark coordinates and mark them on the image
for idx, point in enumerate(landmarks):
pos = (point[0, 0], point[0, 1])
# annotate the positions
cv2.putText(clahe_image, str(idx), pos,
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.4,
color=(0, 0, 255))
# draw points on the landmark positions
cv2.circle(clahe_image, pos, 3, color=(0, 255, 255))
# draw the annotated image on an OpenCV window
cvhelper.createwindow('Mood', clahe_image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
\ No newline at end of file
import cv2
import cvhelper
import numpy as np
from matplotlib import pyplot as plt
from archive import cvhelper
original = cv2.imread('resources/experiments/trump.jpg')
cvhelper.createwindow("Original", original, 0, 0)
......
import cv2
import sys
import dlib
import numpy as np
import cvhelper
from archive import cvhelper
def main():
# Create the haar cascade
......
import cv2
def createwindow(name, view, x=70, y=70, w=600, h=600):
cv2.namedWindow(name, cv2.WINDOW_NORMAL)
cv2.resizeWindow(name, w, h)
cv2.moveWindow(name, x, y)
cv2.imshow(name, view)
\ No newline at end of file
......@@ -3,10 +3,10 @@ This module contains face detections functions.
"""
import cv2
faceDet = cv2.CascadeClassifier('Haarcascade/haarcascade_frontalface_default.xml')
faceDet_two = cv2.CascadeClassifier('Haarcascade/haarcascade_frontalface_alt2.xml')
faceDet_three = cv2.CascadeClassifier('Haarcascade/haarcascade_frontalface_alt.xml')
faceDet_four = cv2.CascadeClassifier('Haarcascade/haarcascade_frontalface_alt_tree.xml')
faceDet = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_default.xml')
faceDet_two = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_alt2.xml')
faceDet_three = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_alt.xml')
faceDet_four = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_alt_tree.xml')
def find_faces(image):
......
......@@ -2,25 +2,25 @@ import glob
from shutil import copyfile
emotions = ["neutral", "anger", "contempt", "disgust", "fear", "happy", "sadness", "surprise"] # Define emotion order
participants = glob.glob("Basis_data\\source_emotion\\*") # Returns a list of all folders with participant numbers
participants = glob.glob("basis_data/source_emotion/*") # Returns a list of all folders with participant numbers
for x in participants:
part = "%s" % x[-4:] # store current participant number
for sessions in glob.glob("Basis_data\\%s\\*" % x): # Store list of sessions for current participant
for files in glob.glob("Basis_data\\%s\\*" % sessions):
for sessions in glob.glob("basis_data/%s/*" % x): # Store list of sessions for current participant
for files in glob.glob("basis_data/%s/*" % sessions):
current_session = files[20:-30]
file = open(files, 'r')
emotion = int(
float(file.readline())) # emotions are encoded as a float, readline as float, then convert to integer.
sourcefile_emotion = glob.glob("Basis_data\\source_images\\%s\\%s\\*" % (part, current_session))[
sourcefile_emotion = glob.glob("basis_data/source_images/%s/%s/*" % (part, current_session))[
-1] # get path for last image in sequence, which contains the emotion
sourcefile_neutral = glob.glob("Basis_data\\source_images\\%s\\%s\\*" % (part, current_session))[
sourcefile_neutral = glob.glob("basis_data/source_images/%s/%s/*" % (part, current_session))[
0] # do same for neutral image
dest_neut = "Basis_data\\sorted_set\\neutral\\%s" % sourcefile_neutral[25:] # Generate path to put neutral image
dest_emot = "Basis_data\\sorted_set\\%s\\%s" % (
dest_neut = "basis_data/sorted_set/neutral/%s" % sourcefile_neutral[25:] # Generate path to put neutral image
dest_emot = "basis_data/sorted_set/%s/%s" % (
emotions[emotion], sourcefile_emotion[25:]) # Do same for emotion containing image
copyfile(sourcefile_neutral, dest_neut) # Copy file
......
......@@ -14,7 +14,7 @@ emotions = ["anger", "disgust", "happy", "neutral", "surprise"] # Emotion list
fishface = cv2.face.FisherFaceRecognizer_create()
def get_files(emotion): # Define function to get file list, randomly shuffle it and split 80/20
files = glob.glob('Basis_data/dataset/%s/*' % emotion)
files = glob.glob('basis_data/dataset/%s/*' % emotion)
random.shuffle(files)
training = files[:int(len(files) * 0.8)] # get first 80% of file list
prediction = files[-int(len(files) * 0.2):] # get last 20% of file list
......@@ -84,4 +84,4 @@ for idx, meta in enumerate(metascore):
logging.debug("fisherface finished with {}%\n".format(np.mean(metascore)))
fishface.write('Basis_data/models/detection_model.xml')
\ No newline at end of file
fishface.write('basis_data/models/detection_model.xml')
\ No newline at end of file
......@@ -2,10 +2,10 @@ import cv2
import glob
import logging
faceDet = cv2.CascadeClassifier('Haarcascade/haarcascade_frontalface_default.xml')
faceDet_two = cv2.CascadeClassifier('Haarcascade/haarcascade_frontalface_alt2.xml')
faceDet_three = cv2.CascadeClassifier('Haarcascade/haarcascade_frontalface_alt.xml')
faceDet_four = cv2.CascadeClassifier('Haarcascade/haarcascade_frontalface_alt_tree.xml')
faceDet = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_default.xml')
faceDet_two = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_alt2.xml')
faceDet_three = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_alt.xml')
faceDet_four = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_alt_tree.xml')
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
......@@ -20,7 +20,7 @@ undetected: list = []
def detect_faces(emotion):
files = glob.glob('Basis_data/sorted_set/%s/*' % emotion) # Get list of all images with emotion
files = glob.glob('basis_data/sorted_set/%s/*' % emotion) # Get list of all images with emotion
global undetected
global totalFaces
......@@ -33,13 +33,13 @@ def detect_faces(emotion):
frame = cv2.imread(f) # Open image
# Detect face using 4 different classifiers
face = faceDet.detectMultiScale(frame, scaleFactor=1.05, minNeighbors=6, minSize=(10, 10),
face = faceDet.detectMultiScale(frame, scaleFactor=1.05, minNeighbors=10, minSize=(10, 10),
flags=cv2.CASCADE_SCALE_IMAGE)
face_two = faceDet_two.detectMultiScale(frame, scaleFactor=1.05, minNeighbors=6, minSize=(10, 10),
face_two = faceDet_two.detectMultiScale(frame, scaleFactor=1.05, minNeighbors=10, minSize=(10, 10),
flags=cv2.CASCADE_SCALE_IMAGE)
face_three = faceDet_three.detectMultiScale(frame, scaleFactor=1.05, minNeighbors=6, minSize=(10, 10),
face_three = faceDet_three.detectMultiScale(frame, scaleFactor=1.05, minNeighbors=10, minSize=(10, 10),
flags=cv2.CASCADE_SCALE_IMAGE)
face_four = faceDet_four.detectMultiScale(frame, scaleFactor=1.05, minNeighbors=6, minSize=(10, 10),
face_four = faceDet_four.detectMultiScale(frame, scaleFactor=1.05, minNeighbors=10, minSize=(10, 10),
flags=cv2.CASCADE_SCALE_IMAGE)
# Go over detected faces, stop at first detected face, return empty if no face.
......@@ -63,19 +63,20 @@ def detect_faces(emotion):
try:
out = cv2.resize(frame, (250, 250)) # Resize face so all images have same size
cv2.imwrite('Basis_data/dataset/%s/%s.jpg' % (emotion, fileNumber), out) # Write image
cv2.imwrite('basis_data/dataset/%s/%s.jpg' % (emotion, fileNumber), out) # Write image
except:
pass # If error, pass file
totalFiles += 1 # Increment image number
fileNumber += 1
logging.debug("end of set\n")
for emotion in emotions:
detect_faces(emotion) # Call functional
logging.debug("\n")
logging.debug("{} Gesichter in {} Dateien gefunden.".format(totalFaces, totalFiles))
logging.debug("Kein Gesicht wurde gefunden in:")
logging.debug("In {} Dateien wurde kein Gesicht gefunden:".format(totalFiles - totalFaces))
for f in undetected:
logging.debug(f)
......@@ -19,7 +19,7 @@ def _load_emoticons(emotions):
:param emotions: Array of emotions names.
:return: Array of emotions graphics.
"""
return [nparray_as_image(cv2.imread('Basis_data/graphics/%s.png' % emotion, -1), mode=None) for emotion in emotions]
return [nparray_as_image(cv2.imread('resources/graphics/%s.png' % emotion, -1), mode=None) for emotion in emotions]
def show_webcam_and_run(model, emoticons, window_size=None, window_name='webcam', update_time=10):
......@@ -69,7 +69,7 @@ if __name__ == '__main__':
# load mode
fisher_face = cv2.face.FisherFaceRecognizer_create()
fisher_face.read('Basis_data/models/detection_model.xml')
fisher_face.read('basis_data/models/detection_model.xml')
# use learnt model
window_name = 'WEBCAM (press ESC to exit)'
show_webcam_and_run(fisher_face, emoticons, window_size=(800, 800), window_name=window_name, update_time=1)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment