Commit 23de87a0 authored by tihmels's avatar tihmels

Repo aufgeräumt, alles mindestens einmal verschoben, aus Prinzip -

Email Service eingebaut, um bei der Massenverarbeitung Logdateien schicken zu können.
Und ArgumentParser, bisher nur bei sorted_set_facedetector implementiert, um Programm mit Parametern über Konsole zu starten. Biaaaatchesssss <3
parent 05fba73b
......@@ -20,10 +20,10 @@ Icon
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
basis_data/dataset/*
basis_data/sorted_set/*
basis_data/source_emotion/*
basis_data/source_images/*
projectmood/img_data/dataset/*
projectmood/img_data/sorted_set/*
projectmood/img_data/source_emotion/*
projectmood/img_data/source_images/*
# Directories potentially created on remote AFP share
.AppleDB
......
# Import required modules
import cv2
import dlib
# Set up some required objects
video_capture = cv2.VideoCapture(0) # Webcam object
detector = dlib.get_frontal_face_detector() # Face detector
detectors = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
predictor = dlib.shape_predictor(
"shape_predictor_68_face_landmarks.dat") # Landmark identifier. Set the filename to whatever you named the downloaded file
while True:
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
clahe_image = clahe.apply(gray)
faces = detectors.detectMultiScale(gray, 1.3, 5)
detections = detector(clahe_image, 1) # Detect the faces in the image
for k, d in enumerate(detections): # For each detected face
shape = predictor(clahe_image, d) # Get coordinates
for i in range(1, 68): # There are 68 landmark points on each face
cv2.circle(frame, (shape.part(i).x, shape.part(i).y), 1, (0, 0, 255),
thickness=2) # For each point, draw a red circle with thickness2 on the original frame
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
roi_color = frame[y:y + h, x:x + w]
cv2.imshow("image", frame) # Display the frame
if cv2.waitKey(1) & 0xFF == ord('q'): # Exit program when the user presses 'q'
break
import cv2
def createwindow(name, view, x=70, y=70, w=150, h=150):
cv2.namedWindow(name, cv2.WINDOW_NORMAL)
cv2.resizeWindow(name, w, h)
cv2.moveWindow(name, x, y)
cv2.imshow(name, view)
\ No newline at end of file
import cv2
import numpy as np
from matplotlib import pyplot as plt
from archive import cvhelper
original = cv2.imread('resources/experiments/trump.jpg')
cvhelper.createwindow("Original", original, 0, 0)
gray = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)
hsv = cv2.cvtColor(original, cv2.COLOR_BGR2HSV)
ret,thresh1 = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
cvhelper.createwindow("Gray", gray, 100)
cvhelper.createwindow("HSV", hsv, 200)
cvhelper.createwindow("Threshold", thresh1, 300)
plt.hist(gray.ravel(), 256, [0,256])
plt.show()
k = cv2.waitKey(0)
if k == cv2.waitKey(0) & 0xFF:
cv2.destroyAllWindows()
\ No newline at end of file
import cv2
import dlib
import numpy as np
from archive import cvhelper
def main():
# Create the haar cascade
detector = cv2.CascadeClassifier('resources/haarcascade_frontalface_default.xml')
# Create the landmark predictor
predictor = dlib.shape_predictor("resources/shape_predictor_68_face_landmarks.dat")
cap = cv2.VideoCapture(0)
while True:
# Read the Video
ret, img = cap.read()
# convert the video to gray scale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Contrast Limited Adaptive Histogram Equalization
clahe = cv2.createCLAHE(clipLimit=4.0, tileGridSize=(8, 8))
clahe_image = clahe.apply(gray)
# Detect faces in the video
faces = detector.detectMultiScale(
clahe_image,
scaleFactor=1.05,
minNeighbors=5,
minSize=(100, 100),
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(clahe_image, (x, y), (x + w, y + h), (255, 0, 0), 2)
# Converting the OpenCV rectangle coordinates to Dlib rectangle
dlib_rect = dlib.rectangle(int(x), int(y), int(x + w), int(y + h))
# use that rectangle as the bounding box to detect the face landmarks,
# and extract out the coordinates of the landmarks so OpenCV can use them
detected_landmarks = predictor(clahe_image, dlib_rect).parts()
landmarks = np.matrix([[p.x, p.y] for p in detected_landmarks])
# enumerate through the landmark coordinates and mark them on the image
for idx, point in enumerate(landmarks):
pos = (point[0, 0], point[0, 1])
# annotate the positions
cv2.putText(clahe_image, str(idx), pos,
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.4,
color=(0, 0, 255))
# draw points on the landmark positions
cv2.circle(clahe_image, pos, 3, color=(0, 255, 255))
# draw the annotated image on an OpenCV window
cvhelper.createwindow('Mood', clahe_image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
\ No newline at end of file
This diff is collapsed.
# The Emotion Face detection Scripts
# You can modify this script as you wish
import cv2
import glob as gb
import random
import numpy as np
# Emotion list
emojis = ["neutral", "anger", "contempt", "disgust", "fear", "happy", "sadness", "surprise"]
# Initialize fisher face classifier
fisher_face = cv2.createFisherFaceRecognizer()
data = {}
# Function defination to get file list, randomly shuffle it and split 67/33
def getFiles(emotion):
files = gb.glob("final_dataset\\%s\\*" % emotion)
random.shuffle(files)
training = files[:int(len(files) * 0.67)] # get first 67% of file list
prediction = files[-int(len(files) * 0.33):] # get last 33% of file list
return training, prediction
def makeTrainingAndValidationSet():
training_data = []
training_labels = []
prediction_data = []
prediction_labels = []
for emotion in emojis:
training, prediction = getFiles(emotion)
# Append data to training and prediction list, and generate labels 0-7
for item in training:
image = cv2.imread(item) # open image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # convert to grayscale
training_data.append(gray) # append image array to training data list
training_labels.append(emojis.index(emotion))
for item in prediction: # repeat above process for prediction set
image = cv2.imread(item)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
prediction_data.append(gray)
prediction_labels.append(emojis.index(emotion))
return training_data, training_labels, prediction_data, prediction_labels
def runClassifier():
training_data, training_labels, prediction_data, prediction_labels = makeTrainingAndValidationSet()
print("training fisher face classifier suing the training data")
print("size of training set is:", len(training_labels), "images")
fisher_face.train(training_data, np.asarray(training_labels))
print("classification prediction")
counter = 0
right = 0
wrong = 0
for image in prediction_data:
pred, conf = fisher_face.predict(image)
if pred == prediction_labels[counter]:
right += 1
counter += 1
else:
wrong += 1
counter += 1
return (100 * right) / (right + wrong)
# Now run the classifier
metascore = []
for i in range(0, 10):
right = runClassifier()
print("got", right, "percent right!")
metascore.append(right)
print("\n\nend score:", np.mean(metascore), "percent right!")
\ No newline at end of file
import cv2
import glob as gb
face_detector1 = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
face_detector2 = cv2.CascadeClassifier("haarcascade_frontalface_alt2.xml")
face_detector3 = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
face_detector4 = cv2.CascadeClassifier("haarcascade_frontalface_alt_tree.xml")
emotion_list = ["neutral", "anger", "contempt", "disgust", "fear", "happy", "sadness", "surprise"]
def faceDetection(emotion):
files = gb.glob("selected_set\\%s\\*" % emotion) # Get list of all images with emotion
filenumber = 0
for f in files:
frame = cv2.imread(f) # Open image
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert image to grayscale
# Detect face using 4 different classifiers
face1 = face_detector1.detectMultiScale(gray, scaleFactor=1.1,
minNeighbors=10, minSize=(5, 5), flags=cv2.CASCADE_SCALE_IMAGE)
face2 = face_detector2.detectMultiScale(gray, scaleFactor=1.1,
minNeighbors=10, minSize=(5, 5), flags=cv2.CASCADE_SCALE_IMAGE)
face3 = face_detector3.detectMultiScale(gray, scaleFactor=1.1,
minNeighbors=10, minSize=(5, 5), flags=cv2.CASCADE_SCALE_IMAGE)
face4 = face_detector4.detectMultiScale(gray, scaleFactor=1.1,
minNeighbors=10, minSize=(5, 5), flags=cv2.CASCADE_SCALE_IMAGE)
# Go over detected faces, stop at first detected face, return empty if no face.
if len(face1) == 1:
facefeatures = face1
elif len(face2) == 1:
facefeatures == face2
elif len(face3) == 1:
facefeatures = face3
elif len(face4) == 1:
facefeatures = face4
else:
facefeatures = ""
# Cut and save face
for (x, y, w, h) in facefeatures: # get coordinates and size of rectangle containing face
print("face found in file: %s" % f)
gray = gray[y:y + h, x:x + w] # Cut the frame to size
try:
out = cv2.resize(gray, (350, 350)) # Resize face so all images have same size
cv2.imwrite("final_dataset\\%s\\%s.jpg" % (emotion, filenumber), out) # Write image
except:
pass # pass the file on error
filenumber += 1 # Increment image number
if __name__ == '__main__':
for emotion in emotion_list:
faceDetection(emotion) # Call our face detection module
import glob as gb
from shutil import copyfile
emotions_list = ["neutral", "anger", "contempt", "disgust", "fear", "happy", "sadness", "surprise"]
emotions_folders = gb.glob("emotions\\*") # Returns a list of all folders with participant numbers
def imageWithEmotionEtraction():
for x in emotions_folders:
participant = "%s" % x[-4:] # store current participant number
for sessions in gb.glob("%s\\*" % x):
for files in gb.glob("%s\\*" % sessions):
current_session = files[20:-30]
file = open(files, 'r')
emotion = int(float(file.readline()))
# get path for last image in sequence, which contains the emotion
sourcefile_emotion = gb.glob("images\\%s\\%s\\*" % (participant, current_session))[-1]
# do same for neutral image
sourcefile_neutral = gb.glob("images\\%s\\%s\\*" % (participant, current_session))[0]
# Generate path to put neutral image
dest_neut = "selected_set\\neutral\\%s" % sourcefile_neutral[25:]
# Do same for emotion containing image
dest_emot = "selected_set\\%s\\%s" % (emotions_list[emotion], sourcefile_emotion[25:])
copyfile(sourcefile_neutral, dest_neut) # Copy file
copyfile(sourcefile_emotion, dest_emot) # Copy file
if __name__ == '__main__':
imageWithEmotionEtraction()
\ No newline at end of file
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
import os
def sendMail(subject, to = 'tjado.ihmels@uni-oldenburg.de', body = '', filepath=''):
fromaddr = "projectmood18@gmail.com"
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = to
msg['Subject'] = subject
msg.attach(MIMEText(body, 'plain'))
if filepath:
filename = os.path.basename(filepath)
attachment = open(filepath, "rb")
part = MIMEBase('application', 'octet-stream')
part.set_payload((attachment).read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', "attachment; filename= %s" % filename)
msg.attach(part)
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(fromaddr, "kR2-zgY-BTf-gRT")
text = msg.as_string()
server.sendmail(fromaddr, to, text)
server.quit()
sendMail('logs/sorted_set_facedetector.log', body='TESTX')
\ No newline at end of file
"""
This module contains face detections functions.
"""
import cv2
faceDet_one = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_default.xml')
faceDet_two = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_alt2.xml')
faceDet_three = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_alt.xml')
faceDet_four = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalface_alt_tree.xml')
faceDet_one = cv2.CascadeClassifier('resources/haarcascade/haarcascade_frontalface_default.xml')
faceDet_two = cv2.CascadeClassifier('resources/haarcascade/haarcascade_frontalface_alt2.xml')
faceDet_three = cv2.CascadeClassifier('resources/haarcascade/haarcascade_frontalface_alt.xml')
faceDet_four = cv2.CascadeClassifier('resources/haarcascade/haarcascade_frontalface_alt_tree.xml')
def extract_faces(image):
faces = find_faces(image)
normalized_faces = [preprocess_face(face) for face in faces]
return normalized_faces
def find_faces(image):
faces_coordinates = locate_faces(image)
cutted_faces = [image[y:y + h, x:x + w] for (x, y, w, h) in faces_coordinates]
normalized_faces = [normalize_face(face) for face in cutted_faces]
return zip(normalized_faces, faces_coordinates)
return cutted_faces
def normalize_face(face):
def preprocess_face(face):
face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
face = cv2.resize(face, (250, 250))
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
face = clahe.apply(face)
return face
def locate_faces(image, scaleFactor=1.4, minNeighbors=3, minSize=(20, 20)):
minx, miny = minSize
def locate_faces(image, scaleFactor=1.2, minNeighbors=6, minSize=(100, 100)):
face = faceDet_one.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(minx, miny),
face = faceDet_one.detectMultiScale(image, scaleFactor, minNeighbors, minSize=minSize,
flags=cv2.CASCADE_SCALE_IMAGE)
if len(face) == 1:
return face
face = faceDet_two.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(minx, miny),
face = faceDet_two.detectMultiScale(image, scaleFactor, minNeighbors, minSize=minSize,
flags=cv2.CASCADE_SCALE_IMAGE)
if len(face) == 1:
return face
face = faceDet_three.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(minx, miny),
face = faceDet_three.detectMultiScale(image, scaleFactor, minNeighbors, minSize=minSize,
flags=cv2.CASCADE_SCALE_IMAGE)
if len(face) == 1:
return face
face = faceDet_four.detectMultiScale(image, scaleFactor, minNeighbors, minSize=(minx, miny),
face = faceDet_four.detectMultiScale(image, scaleFactor, minNeighbors, minSize=minSize,
flags=cv2.CASCADE_SCALE_IMAGE)
if len(face) == 1:
......
......@@ -14,7 +14,7 @@ emotions = ["neutral", "anger", "contempt", "disgust", "fear", "happy", "sadness
"""
Liste mit den Dateiordnern aller Teilnehmer
"""
participants = glob.glob("basis_data/source_emotion/*")
participants = glob.glob("img_data/source_emotion/*")
for x in participants:
......@@ -39,7 +39,7 @@ for x in participants:
"""
emotion = int(float(file.readline()))
source_emotions = glob.glob("basis_data/source_images/%s/%s/*.png" % (number, current_session))
source_emotions = glob.glob("img_data/source_images/%s/%s/*.png" % (number, current_session))
source_emotions.sort()
"""
......@@ -58,8 +58,8 @@ for x in participants:
Für den neutralen Ausdruck
Für die Emotion
"""
dest_neut = "basis_data/sorted_set/neutral/%s" % sourcefile_neutral[36:]
dest_emot = "basis_data/sorted_set/%s/%s" % (emotions[emotion], sourcefile_emotion[36:])
dest_neut = "img_data/sorted_set/neutral/%s" % sourcefile_neutral[36:]
dest_emot = "img_data/sorted_set/%s/%s" % (emotions[emotion], sourcefile_emotion[36:])
"""Kopiert Dateien"""
copyfile(sourcefile_neutral, dest_neut)
......
......@@ -31,7 +31,7 @@ def _get_faces_from_emotion(emotion):
:param emotion: Die Emotion
:return: training, prediction
"""
files = glob.glob('basis_data/dataset/{}/*'.format(emotion))
files = glob.glob('img_data/dataset/{}/*'.format(emotion))
random.shuffle(files)
"""
......@@ -51,8 +51,9 @@ def image_preprocessing(image):
"""
img = cv2.imread(image) # open image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # convert to grayscale
blur = cv2.GaussianBlur(gray, (5, 5), 0)
return blur
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
face = clahe.apply(gray)
return face
def make_sets():
......@@ -108,7 +109,7 @@ fishface = cv2.face.FisherFaceRecognizer_create()
metascore = []
for i in range(0, 10):
for i in range(0, 40):
correct = run_recognizer()
file.write("{}\n".format(int(correct)))
logging.debug("{} : {}%".format(i, int(correct)))
......@@ -117,4 +118,4 @@ for i in range(0, 10):
file.close()
logging.debug("{} iterations - {}% average\n".format(len(metascore), np.mean(metascore)))
fishface.write('basis_data/models/detection_model.xml')
fishface.write('img_data/models/detection_model.xml')
import cv2
import glob
import logging
import argparse
import shutil
import sys
import os
from face_detect import locate_faces
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
......@@ -9,60 +15,91 @@ logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(name)-12s %(level
filemode='w')
"""
Emotionen die verarbeitet werden sollen
Argument Parser erlaubt Parameter für die Verarbeitung anzugeben.
"""
emotions = ["happy", "neutral", "surprise"]
parser = argparse.ArgumentParser(description='Sorted Set Face Creator Application')
parser.add_argument('-s', action='store', dest='img_source', default='resources/img_data/sorted_set/',
help='Pfad zu den Bilddateien')
parser.add_argument('-b', action='store', dest='dataset', default='resources/img_data/dataset/', help='Pfad zum Dataset')
parser.add_argument('-e', action='append', dest='emotions', default=['happy', 'neutral', 'surprised'],
help='Emotionen die verarbeitet werden sollen')
parser.add_argument('-r', action='store', dest='resize', default=150, type=int, help='Resize Factor')
parser.add_argument('-f', action='store', dest='scaleFactor', default=1.1, type=float,
help='Skalierungsfaktor für Haarcascade')
parser.add_argument('-n', action='store', dest='minNeighbors', default=6, type=int, help='MinNeighbors für Haarcascade')
parser.add_argument('-m', action='store', dest='minSize', default=40, type=int, help='MinSize für Haarcascade')
arguments = parser.parse_args()
logging.debug(arguments)
datasetPath = arguments.dataset
if len(glob.glob(datasetPath + '*')) > 0:
deleteDataset = input(
'Im Dataset befinden sich Dateien. Durch diesen Prozess werden die existierenden Daten gelöscht. Fortfahren (y/n): ')
if deleteDataset == 'y':
for file in glob.glob(datasetPath + '*'):
shutil.rmtree(file)
else:
sys.exit()
totalFiles: int = 0
totalFaces: int = 0
undetected: list = []
img_source = arguments.img_source
def detect_faces(emotion):
"""
Holt alle Dateien zu einer Emotion aus dem sorted_set
"""
files = glob.glob('basis_data/sorted_set/{}/*'.format(emotion))
files = glob.glob(img_source + '{}/*'.format(emotion))
global undetected
global totalFaces
global totalFiles
logging.debug("{} files in folder {}".format(len(files), emotion))
logging.debug("found {} {} files".format(len(files), emotion))
fileNumber = 0
for f in files:
frame = cv2.imread(f) # Open image
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert image to grayscale
facefeatures = locate_faces(gray, 1.1, 10, (40, 40))
facefeatures = locate_faces(gray, arguments.scaleFactor, arguments.minNeighbors, (arguments.minSize, arguments.minSize))
if facefeatures is '':
undetected.append(f)
else:
# Cut and save face
for (x, y, w, h) in facefeatures: # get coordinates and size of rectangle containing face
logging.debug("face found in file: {}".format(f))
totalFaces += 1
gray = gray[y:y + h, x:x + w] # Cut the frame to size
try:
out = cv2.resize(gray, (250, 250)) # Resize face so all images have same size
cv2.imwrite('basis_data/dataset/{}/{}.jpg'.format(emotion, fileNumber), out) # Write image
out = cv2.resize(gray, (arguments.resize, arguments.resize)) # Resize face so all images have same size
success = cv2.imwrite(datasetPath + '{}/{}.jpg'.format(emotion, fileNumber), out) # Write image
if not success:
logging.error('A problem while writing file occurred')
sys.exit()
except:
pass # If error, pass file
totalFiles += 1 # Increment image number
fileNumber += 1
logging.debug("end of set\n")
logging.debug("\n")
for emotion in arguments.emotions:
if not os.path.exists(datasetPath + emotion):
os.makedirs(datasetPath + emotion)
for emotion in emotions:
detect_faces(emotion) # Call functional
logging.debug("{} Gesichter in {} Dateien gefunden.".format(totalFaces, totalFiles))
logging.debug("In {} Dateien wurde kein Gesicht gefunden:".format(totalFiles - totalFaces))
logging.debug('{} faces in {} files found'.format(totalFaces, totalFiles))
logging.debug('in {} files no face could be detected'.format(totalFiles - totalFaces))
for f in undetected:
logging.debug(f)
......@@ -7,21 +7,21 @@ import cv2
from RingBuffer import RingBuffer
from WebcamVideoStream import WebcamVideoStream
from face_detect import find_faces
from face_detect import extract_faces
from image_commons import nparray_as_image, draw_with_alpha
import numpy as np
def _load_emoticons(emotions):
"""
Lädt die Emoticons aus dem graphics Ordner.
Lädt die Emoticons aus dem emojis Ordner.
:param emotions: Array von Emotionen.
:return: Array von Emotions Grafiken.
"""
return [nparray_as_image(cv2.imread('resources/graphics/%s.png' % emotion, -1), mode=None) for emotion in emotions]
return [nparray_as_image(cv2.imread('resources/emojis/%s.png' % emotion, -1), mode=None) for emotion in emotions]
def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name='Mood Expression', update_time=1):
def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name='Project Mood', update_time=1):
"""
Zeigt ein Webcam-Bild, erkennt Gesichter und Emotionen in Echtzeit und zeichnet Emoticons neben die Gesichter.
:param model: Trainiertes Model
......@@ -31,40 +31,40 @@ def show_webcam_and_run(model, emoticons, window_size=(600, 600), window_name='M
:param update_time: Bildaktualisierungzeit.
"""
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
if window_size:
width, height = window_size
cv2.resizeWindow(window_name, width, height)
width, height = window_size
cv2.resizeWindow(window_name, width, height)
vc = WebcamVideoStream().start()
frame = vc.read()
"""
Der RingBuffer speichert die letzten 7 Predictions
"""
puffer = RingBuffer(7)
buffer = RingBuffer(7)
while True:
for normalized_face, (x, y, w, h) in find_faces(frame):
frame = vc.read()
while True:
for normalized_face in extract_faces(frame):
prediction = model.predict(normalized_face) # do prediction
"""
Seichert die Predictions
"""
puffer.append(prediction[0])
buffer.append(prediction[0])
"""
Holt die Einträge als Array
Holt die Einträge als Array
"""
preds = puffer.get()
predictions = buffer.get()
"""
Kein Eintrag im RingBuffer ist None
"""