Commit 43df6bec authored by tihmels's avatar tihmels

N bissl Aufgeräumt

parent d9043415
import cv2 import cv2
def createwindow(name, view, x=70, y=70, width=700, height=700):
def createwindow(name: str, view, x=70, y=70, width=700, height=700):
cv2.namedWindow(name, cv2.WINDOW_NORMAL) cv2.namedWindow(name, cv2.WINDOW_NORMAL)
cv2.resizeWindow(name, width, height) cv2.resizeWindow(name, width, height)
cv2.moveWindow(name, x, y) cv2.moveWindow(name, x, y)
cv2.imshow(name, view) return cv2.imshow(name, view)
\ No newline at end of file \ No newline at end of file
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
import cv2 import cv2
import sys import sys
import dlib import dlib
import numpy as np
from projectmood import cvhelper import cvhelper
def main(): def main():
# Set up some required objects # Create the haar cascade
detector = dlib.get_frontal_face_detector() # Face detector detector = cv2.CascadeClassifier('resources/haarcascade_frontalface_default.xml')
detectors = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
predictor = dlib.shape_predictor( # Create the landmark predictor
"shape_predictor_68_face_landmarks.dat") # Landmark identifier. Set the filename to whatever you named the downloaded file predictor = dlib.shape_predictor("resources/shape_predictor_68_face_landmarks.dat")
if len(sys.argv) > 1: cap = cv2.VideoCapture(0)
cap = cv2.VideoCapture(str(sys.argv[1]))
cap = cv2.VideoCapture(0)
while(True): while True:
# Capture frame-by-frame
ret, frame =
# Our operations on the frame come here # Read the Video
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) ret, img =
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) # convert the video to gray scale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Contrast Limited Adaptive Histogram Equalization
clahe = cv2.createCLAHE(clipLimit=4.0, tileGridSize=(8, 8))
clahe_image = clahe.apply(gray) clahe_image = clahe.apply(gray)
faces = detectors.detectMultiScale(gray, 1.3, 5)
detections = detector(clahe_image, 1) # Detect the faces in the image # Detect faces in the video
faces = detector.detectMultiScale(
minSize=(100, 100),
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(clahe_image, (x, y), (x + w, y + h), (255, 0, 0), 2)
for k, d in enumerate(detections): # For each detected face # Converting the OpenCV rectangle coordinates to Dlib rectangle
dlib_rect = dlib.rectangle(int(x), int(y), int(x + w), int(y + h))
shape = predictor(clahe_image, d) # Get coordinates # use that rectangle as the bounding box to detect the face landmarks,
for i in range(1, 68): # There are 68 landmark points on each face # and extract out the coordinates of the landmarks so OpenCV can use them, (shape.part(i).x, shape.part(i).y), 1, (0, 0, 255), detected_landmarks = predictor(clahe_image, dlib_rect).parts()
thickness=2) # For each point, draw a red circle with thickness2 on the original frame landmarks = np.matrix([[p.x, p.y] for p in detected_landmarks])
for (x, y, w, h) in faces:
cv2.rectangle(gray, (x, y), (x + w, y + h), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
roi_color = frame[y:y + h, x:x + w]
cvhelper.createwindow('Grayscale', gray) # enumerate through the landmark coordinates and mark them on the image
for idx, point in enumerate(landmarks):
pos = (point[0, 0], point[0, 1])
# annotate the positions
cv2.putText(clahe_image, str(idx), pos,
color=(0, 0, 255))
# draw points on the landmark positions, pos, 3, color=(0, 255, 255))
# draw the annotated image on an OpenCV window
cvhelper.createwindow('Mood', clahe_image)
if cv2.waitKey(1) & 0xFF == ord('q'): if cv2.waitKey(1) & 0xFF == ord('q'):
break break
# When everything done, release the capture
cap.release() cap.release()
cv2.destroyAllWindows() cv2.destroyAllWindows()
if __name__ == '__main__': if __name__ == '__main__':
main() main()
\ No newline at end of file
...@@ -8,8 +8,6 @@ setup( ...@@ -8,8 +8,6 @@ setup(
version='0.1', version='0.1',
packages=['projectmood'], packages=['projectmood'],
license='Carl-von-Ossietzky Universitaet', license='Carl-von-Ossietzky Universitaet',
long_description=long_description, long_description=long_description,
platforms='any', platforms='any',
description='Christina Tsiroglou, Arne Gerdes, Tjado Ihmels', description='Christina Tsiroglou, Arne Gerdes, Tjado Ihmels',
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment