Commit 43df6bec authored by tihmels's avatar tihmels

N bissl Aufgeräumt

parent d9043415
import cv2 import cv2
def createwindow(name, view, x=70, y=70, width=700, height=700):
def createwindow(name: str, view, x=70, y=70, width=700, height=700):
cv2.namedWindow(name, cv2.WINDOW_NORMAL) cv2.namedWindow(name, cv2.WINDOW_NORMAL)
cv2.resizeWindow(name, width, height) cv2.resizeWindow(name, width, height)
cv2.moveWindow(name, x, y) cv2.moveWindow(name, x, y)
cv2.imshow(name, view) return cv2.imshow(name, view)
\ No newline at end of file \ No newline at end of file
This diff is collapsed.
This diff is collapsed.
import cv2 import cv2
import sys import sys
import dlib import dlib
import numpy as np
from projectmood import cvhelper import cvhelper
def main(): def main():
# Set up some required objects # Create the haar cascade
detector = dlib.get_frontal_face_detector() # Face detector detector = cv2.CascadeClassifier('resources/haarcascade_frontalface_default.xml')
detectors = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
predictor = dlib.shape_predictor( # Create the landmark predictor
"shape_predictor_68_face_landmarks.dat") # Landmark identifier. Set the filename to whatever you named the downloaded file predictor = dlib.shape_predictor("resources/shape_predictor_68_face_landmarks.dat")
if len(sys.argv) > 1:
cap = cv2.VideoCapture(str(sys.argv[1]))
else:
cap = cv2.VideoCapture(0) cap = cv2.VideoCapture(0)
while(True): while True:
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here # Read the Video
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) ret, img = cap.read()
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) # convert the video to gray scale
clahe_image = clahe.apply(gray) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = detectors.detectMultiScale(gray, 1.3, 5)
detections = detector(clahe_image, 1) # Detect the faces in the image # Contrast Limited Adaptive Histogram Equalization
clahe = cv2.createCLAHE(clipLimit=4.0, tileGridSize=(8, 8))
clahe_image = clahe.apply(gray)
for k, d in enumerate(detections): # For each detected face # Detect faces in the video
faces = detector.detectMultiScale(
clahe_image,
scaleFactor=1.05,
minNeighbors=5,
minSize=(100, 100),
)
shape = predictor(clahe_image, d) # Get coordinates # Draw a rectangle around the faces
for i in range(1, 68): # There are 68 landmark points on each face
cv2.circle(gray, (shape.part(i).x, shape.part(i).y), 1, (0, 0, 255),
thickness=2) # For each point, draw a red circle with thickness2 on the original frame
for (x, y, w, h) in faces: for (x, y, w, h) in faces:
cv2.rectangle(gray, (x, y), (x + w, y + h), (255, 0, 0), 2) cv2.rectangle(clahe_image, (x, y), (x + w, y + h), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
roi_color = frame[y:y + h, x:x + w] # Converting the OpenCV rectangle coordinates to Dlib rectangle
dlib_rect = dlib.rectangle(int(x), int(y), int(x + w), int(y + h))
cvhelper.createwindow('Grayscale', gray) # use that rectangle as the bounding box to detect the face landmarks,
# and extract out the coordinates of the landmarks so OpenCV can use them
detected_landmarks = predictor(clahe_image, dlib_rect).parts()
landmarks = np.matrix([[p.x, p.y] for p in detected_landmarks])
# enumerate through the landmark coordinates and mark them on the image
for idx, point in enumerate(landmarks):
pos = (point[0, 0], point[0, 1])
# annotate the positions
cv2.putText(clahe_image, str(idx), pos,
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.4,
color=(0, 0, 255))
# draw points on the landmark positions
cv2.circle(clahe_image, pos, 3, color=(0, 255, 255))
# draw the annotated image on an OpenCV window
cvhelper.createwindow('Mood', clahe_image)
if cv2.waitKey(1) & 0xFF == ord('q'): if cv2.waitKey(1) & 0xFF == ord('q'):
break break
# When everything done, release the capture
cap.release() cap.release()
cv2.destroyAllWindows() cv2.destroyAllWindows()
if __name__ == '__main__': if __name__ == '__main__':
main() main()
\ No newline at end of file
...@@ -8,8 +8,6 @@ setup( ...@@ -8,8 +8,6 @@ setup(
version='0.1', version='0.1',
packages=['projectmood'], packages=['projectmood'],
license='Carl-von-Ossietzky Universitaet', license='Carl-von-Ossietzky Universitaet',
author='tjadoihmels',
author_email='tjado.ihmels@uni-oldenburg.de',
long_description=long_description, long_description=long_description,
platforms='any', platforms='any',
description='Christina Tsiroglou, Arne Gerdes, Tjado Ihmels', description='Christina Tsiroglou, Arne Gerdes, Tjado Ihmels',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment