main.py 2.3 KB
Newer Older
tihmels's avatar
tihmels committed
1 2
import cv2
import sys
3
import dlib
tihmels's avatar
tihmels committed
4
import numpy as np
5

tihmels's avatar
tihmels committed
6
import cvhelper
tihmels's avatar
tihmels committed
7 8

def main():
tihmels's avatar
tihmels committed
9 10 11 12 13
    # Create the haar cascade
    detector = cv2.CascadeClassifier('resources/haarcascade_frontalface_default.xml')

    # Create the landmark predictor
    predictor = dlib.shape_predictor("resources/shape_predictor_68_face_landmarks.dat")
tihmels's avatar
tihmels committed
14

tihmels's avatar
tihmels committed
15
    cap = cv2.VideoCapture(0)
tihmels's avatar
tihmels committed
16

tihmels's avatar
tihmels committed
17
    while True:
tihmels's avatar
tihmels committed
18

tihmels's avatar
tihmels committed
19 20
        # Read the Video
        ret, img = cap.read()
tihmels's avatar
tihmels committed
21

tihmels's avatar
tihmels committed
22 23 24 25 26
        # convert the video to gray scale
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # Contrast Limited Adaptive Histogram Equalization
        clahe = cv2.createCLAHE(clipLimit=4.0, tileGridSize=(8, 8))
27 28
        clahe_image = clahe.apply(gray)

tihmels's avatar
tihmels committed
29 30 31 32 33 34 35 36 37 38 39
        # Detect faces in the video
        faces = detector.detectMultiScale(
            clahe_image,
            scaleFactor=1.05,
            minNeighbors=5,
            minSize=(100, 100),
        )

        # Draw a rectangle around the faces
        for (x, y, w, h) in faces:
            cv2.rectangle(clahe_image, (x, y), (x + w, y + h), (255, 0, 0), 2)
40

tihmels's avatar
tihmels committed
41 42
            # Converting the OpenCV rectangle coordinates to Dlib rectangle
            dlib_rect = dlib.rectangle(int(x), int(y), int(x + w), int(y + h))
43

tihmels's avatar
tihmels committed
44 45 46 47
            # use that rectangle as the bounding box to detect the face landmarks,
            # and extract out the coordinates of the landmarks so OpenCV can use them
            detected_landmarks = predictor(clahe_image, dlib_rect).parts()
            landmarks = np.matrix([[p.x, p.y] for p in detected_landmarks])
48

tihmels's avatar
tihmels committed
49 50 51
            # enumerate through the landmark coordinates and mark them on the image
            for idx, point in enumerate(landmarks):
                pos = (point[0, 0], point[0, 1])
tihmels's avatar
tihmels committed
52

tihmels's avatar
tihmels committed
53 54 55 56 57 58 59 60 61 62 63
                # annotate the positions
                cv2.putText(clahe_image, str(idx), pos,
                            fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                            fontScale=0.4,
                            color=(0, 0, 255))

                # draw points on the landmark positions
                cv2.circle(clahe_image, pos, 3, color=(0, 255, 255))

        # draw the annotated image on an OpenCV window
        cvhelper.createwindow('Mood', clahe_image)
tihmels's avatar
tihmels committed
64 65 66

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
tihmels's avatar
tihmels committed
67

tihmels's avatar
tihmels committed
68 69 70
    cap.release()
    cv2.destroyAllWindows()

tihmels's avatar
tihmels committed
71

tihmels's avatar
tihmels committed
72 73
if __name__ == '__main__':
    main()