Commit 8f47806c authored by tihmels's avatar tihmels

Preprocessed image wird ins Bild projiziert

parent f4ae12d9
...@@ -8,7 +8,7 @@ class WebcamVideoStream: ...@@ -8,7 +8,7 @@ class WebcamVideoStream:
# Initialize webcam stream and read the first frame from the stream # Initialize webcam stream and read the first frame from the stream
self.stream = cv2.VideoCapture(src) self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read() (self.ret, self.frame) = self.stream.read()
# Variable indicating whether the thread should be stopped # Variable indicating whether the thread should be stopped
self.stopped = False self.stopped = False
...@@ -29,7 +29,7 @@ class WebcamVideoStream: ...@@ -29,7 +29,7 @@ class WebcamVideoStream:
return return
# Otherwise, the next frame is read from the stream # Otherwise, the next frame is read from the stream
(self.grabbed, self.frame) = self.stream.read() self.ret, self.frame = self.stream.read()
def read(self): def read(self):
...@@ -40,3 +40,7 @@ class WebcamVideoStream: ...@@ -40,3 +40,7 @@ class WebcamVideoStream:
# indicates that the thread should be stopped # indicates that the thread should be stopped
self.stopped = True self.stopped = True
def size(self):
# returns the width and height of the video stream
return int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT))
...@@ -27,7 +27,7 @@ def preprocess_face(face, resize): ...@@ -27,7 +27,7 @@ def preprocess_face(face, resize):
return face return face
def locate_faces(image, scaleFactor=1.2, minNeighbors=6, minSize=(100, 100)): def locate_faces(image, scaleFactor=1.1, minNeighbors=6, minSize=(350, 350)):
face = faceDet_one.detectMultiScale(image, scaleFactor, minNeighbors, minSize=minSize, face = faceDet_one.detectMultiScale(image, scaleFactor, minNeighbors, minSize=minSize,
flags=cv2.CASCADE_SCALE_IMAGE) flags=cv2.CASCADE_SCALE_IMAGE)
......
...@@ -49,3 +49,16 @@ def draw_with_alpha(source_image, image_to_draw, coordinates): ...@@ -49,3 +49,16 @@ def draw_with_alpha(source_image, image_to_draw, coordinates):
for c in range(0, 3): for c in range(0, 3):
source_image[y:y + h, x:x + w, c] = image_array[:, :, c] * (image_array[:, :, 3] / 255.0) \ source_image[y:y + h, x:x + w, c] = image_array[:, :, c] * (image_array[:, :, 3] / 255.0) \
+ source_image[y:y + h, x:x + w, c] * (1.0 - image_array[:, :, 3] / 255.0) + source_image[y:y + h, x:x + w, c] * (1.0 - image_array[:, :, 3] / 255.0)
def draw_img(source_image, image_to_draw, coordinates):
"""
Zeichnet ein teilweise transparentes Bild über ein anderes Bild.
:param source_image: Bild zum Übermalen.
:param image_to_draw: Bild zum Zeichnen.
:param coordinates: : Koordinaten um ein Bild zu zeichnen. Tupel von x, y, Breite und Höhe.
"""
x, y, w, h = coordinates
image_to_draw = image_to_draw.resize((h, w), Image.ANTIALIAS)
image_array = image_as_nparray(image_to_draw)
for c in range(0, 3):
source_image[y:y + h, x:x + w, c] = image_array[:, :]
\ No newline at end of file
...@@ -4,10 +4,11 @@ import os ...@@ -4,10 +4,11 @@ import os
import cv2 import cv2
import cv2.face import cv2.face
import numpy as np import numpy as np
from RingBuffer import RingBuffer from RingBuffer import RingBuffer
from WebcamVideoStream import WebcamVideoStream from WebcamVideoStream import WebcamVideoStream
from face_detect import extract_faces from face_detect import extract_faces
from image_commons import nparray_as_image, draw_with_alpha from image_commons import nparray_as_image, draw_with_alpha, draw_img
parser = argparse.ArgumentParser(description='ProjectMood Emotion Detection') parser = argparse.ArgumentParser(description='ProjectMood Emotion Detection')
parser.add_argument('-b', '--buffer', action='store', dest='buffer', default=12, type=int, help='size of ringbuffer') parser.add_argument('-b', '--buffer', action='store', dest='buffer', default=12, type=int, help='size of ringbuffer')
...@@ -52,14 +53,20 @@ def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name=pa ...@@ -52,14 +53,20 @@ def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name=pa
while True: while True:
for normalized_face in extract_faces(frame, resizefactor): for normalized_face in extract_faces(frame, resizefactor):
prediction = model.predict(normalized_face) # do prediction # prediction = model.predict(normalized_face) # do prediction
# Save the Predictions # Save the Predictions
buffer.append(prediction[0]) # buffer.append(prediction[0])
# Get the entries as an array # Get the entries as an array
predictions = buffer.get() predictions = buffer.get()
# Read the processed input image
processed_image = nparray_as_image(normalized_face[:, :], mode='L')
w, h = vc.size()
# And print it to the frame
draw_img(frame, processed_image, (w-300, h-300, 250, 250))
# No entry in the ring buffer is None # No entry in the ring buffer is None
if not (any(x is None for x in predictions)): if not (any(x is None for x in predictions)):
# Counting occurrences of predictions # Counting occurrences of predictions
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment