diff --git a/projectmood/face_detect.py b/projectmood/face_detect.py index a54283b0c7c4fd1acba83b4b2f3c20cc6d01fe39..226b36600e00ee7ecb3201ee73b08037fd8d8f22 100644 --- a/projectmood/face_detect.py +++ b/projectmood/face_detect.py @@ -3,10 +3,10 @@ This module contains face detections functions. """ import cv2 -faceDet = cv2.CascadeClassifier("Haarcascade\\haarcascade_frontalface_default.xml") -faceDet_two = cv2.CascadeClassifier("Haarcascade\\haarcascade_frontalface_alt2.xml") -faceDet_three = cv2.CascadeClassifier("Haarcascade\\haarcascade_frontalface_alt.xml") -faceDet_four = cv2.CascadeClassifier("Haarcascade\\haarcascade_frontalface_alt_tree.xml") +faceDet = cv2.CascadeClassifier('Haarcascade/haarcascade_frontalface_default.xml') +faceDet_two = cv2.CascadeClassifier('Haarcascade/haarcascade_frontalface_alt2.xml') +faceDet_three = cv2.CascadeClassifier('Haarcascade/haarcascade_frontalface_alt.xml') +faceDet_four = cv2.CascadeClassifier('Haarcascade/haarcascade_frontalface_alt_tree.xml') def find_faces(image): @@ -24,23 +24,28 @@ def _normalize_face(face): def _locate_faces(image): - face_one = faceDet.detectMultiScale(image, scaleFactor=1.1, minNeighbors=15, minSize=(70, 70), + face_one = faceDet.detectMultiScale(image, scaleFactor=2, minNeighbors=4, minSize=(50, 50), flags=cv2.CASCADE_SCALE_IMAGE) - face_two = faceDet_two.detectMultiScale(image, scaleFactor=1.1, minNeighbors=15, minSize=(70, 70), + + if len(face_one) == 1: + return face_one + + face_two = faceDet_two.detectMultiScale(image, scaleFactor=2, minNeighbors=4, minSize=(50, 50), flags=cv2.CASCADE_SCALE_IMAGE) - face_three = faceDet_three.detectMultiScale(image, scaleFactor=1.1, minNeighbors=15, minSize=(70, 70), + + if len(face_two) == 1: + return face_two + + face_three = faceDet_three.detectMultiScale(image, scaleFactor=2, minNeighbors=4, minSize=(50, 50), flags=cv2.CASCADE_SCALE_IMAGE) - face_four = faceDet_four.detectMultiScale(image, scaleFactor=1.1, minNeighbors=15, minSize=(70, 70), + + if len(face_three) == 1: + return face_three + + face_four = faceDet_four.detectMultiScale(image, scaleFactor=2, minNeighbors=4, minSize=(50, 50), flags=cv2.CASCADE_SCALE_IMAGE) - if len(face_one) == 1: - facefeatures = face_one - elif len(face_two) == 1: - facefeatures = face_two - elif len(face_three) == 1: - facefeatures = face_three - elif len(face_four) == 1: - facefeatures = face_four - else: - facefeatures = "" - - return facefeatures + + if len(face_four) == 1: + return face_four + + return '' diff --git a/projectmood/webcam.py b/projectmood/webcam.py index 2a5114ab11cc31bf7169c7af24489839e72121b4..dcef20e9fd993854ef6a9ab9bf6df864e42b88db 100644 --- a/projectmood/webcam.py +++ b/projectmood/webcam.py @@ -40,15 +40,17 @@ def show_webcam_and_run(model, emoticons, window_size=None, window_name='webcam' if vc.isOpened(): read_value, webcam_image = vc.read() else: - print("webcam not found") + print("Webcam nicht gefunden") return while read_value: for normalized_face, (x, y, w, h) in find_faces(webcam_image): prediction = model.predict(normalized_face) # do prediction + cv2.imshow("Face", normalized_face) image_to_draw = emoticons[(prediction[0])] - draw_with_alpha(webcam_image, image_to_draw, (x-150, y-50, w-150, h-150)) + if x - 150 > 0 and y - 50 > 0 and w - 150 > 0 and h - 150 > 0: + draw_with_alpha(webcam_image, image_to_draw, (x-150, y-50, w-150, h-150)) cv2.imshow(window_name, webcam_image) read_value, webcam_image = vc.read() @@ -67,7 +69,7 @@ if __name__ == '__main__': # load mode fisher_face = cv2.face.FisherFaceRecognizer_create() - fisher_face.read("Basis_data\\models\\detection_model.xml") + fisher_face.read('Basis_data/models/detection_model.xml') # use learnt model window_name = 'WEBCAM (press ESC to exit)' -show_webcam_and_run(fisher_face, emoticons, window_size=(800, 800), window_name=window_name, update_time=10) +show_webcam_and_run(fisher_face, emoticons, window_size=(800, 800), window_name=window_name, update_time=1)