Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
M
medienverarbeitung17.projectmood
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Packages
Packages
Container Registry
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Commits
Open sidebar
Frank Tjado Ihmels
medienverarbeitung17.projectmood
Commits
497b8e88
Commit
497b8e88
authored
Dec 17, 2017
by
tihmels
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
RingBuffer implementiert, Kommentare geschrieben, Emotionen vorerst auf drei eingeschränkt!
parent
46440f50
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
98 additions
and
66 deletions
+98
-66
projectmood/RingBuffer.py
projectmood/RingBuffer.py
+10
-0
projectmood/face_detect.py
projectmood/face_detect.py
+16
-16
projectmood/image_sort.py
projectmood/image_sort.py
+18
-17
projectmood/process_model.py
projectmood/process_model.py
+24
-9
projectmood/sorted_set_facedetector.py
projectmood/sorted_set_facedetector.py
+15
-14
projectmood/webcam.py
projectmood/webcam.py
+15
-10
No files found.
projectmood/RingBuffer.py
0 → 100644
View file @
497b8e88
class
RingBuffer
:
def
__init__
(
self
,
size
):
self
.
data
=
[
None
for
i
in
range
(
size
)]
def
append
(
self
,
x
):
self
.
data
.
pop
(
0
)
self
.
data
.
append
(
x
)
def
get
(
self
):
return
self
.
data
\ No newline at end of file
projectmood/face_detect.py
View file @
497b8e88
...
@@ -3,7 +3,7 @@ This module contains face detections functions.
...
@@ -3,7 +3,7 @@ This module contains face detections functions.
"""
"""
import
cv2
import
cv2
faceDet
=
cv2
.
CascadeClassifier
(
'resources/Haarcascade/haarcascade_frontalface_default.xml'
)
faceDet
_one
=
cv2
.
CascadeClassifier
(
'resources/Haarcascade/haarcascade_frontalface_default.xml'
)
faceDet_two
=
cv2
.
CascadeClassifier
(
'resources/Haarcascade/haarcascade_frontalface_alt2.xml'
)
faceDet_two
=
cv2
.
CascadeClassifier
(
'resources/Haarcascade/haarcascade_frontalface_alt2.xml'
)
faceDet_three
=
cv2
.
CascadeClassifier
(
'resources/Haarcascade/haarcascade_frontalface_alt.xml'
)
faceDet_three
=
cv2
.
CascadeClassifier
(
'resources/Haarcascade/haarcascade_frontalface_alt.xml'
)
faceDet_four
=
cv2
.
CascadeClassifier
(
'resources/Haarcascade/haarcascade_frontalface_alt_tree.xml'
)
faceDet_four
=
cv2
.
CascadeClassifier
(
'resources/Haarcascade/haarcascade_frontalface_alt_tree.xml'
)
...
@@ -12,40 +12,40 @@ faceDet_four = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalf
...
@@ -12,40 +12,40 @@ faceDet_four = cv2.CascadeClassifier('resources/Haarcascade/haarcascade_frontalf
def
find_faces
(
image
):
def
find_faces
(
image
):
faces_coordinates
=
locate_faces
(
image
)
faces_coordinates
=
locate_faces
(
image
)
cutted_faces
=
[
image
[
y
:
y
+
h
,
x
:
x
+
w
]
for
(
x
,
y
,
w
,
h
)
in
faces_coordinates
]
cutted_faces
=
[
image
[
y
:
y
+
h
,
x
:
x
+
w
]
for
(
x
,
y
,
w
,
h
)
in
faces_coordinates
]
normalized_faces
=
[
_
normalize_face
(
face
)
for
face
in
cutted_faces
]
normalized_faces
=
[
normalize_face
(
face
)
for
face
in
cutted_faces
]
return
zip
(
normalized_faces
,
faces_coordinates
)
return
zip
(
normalized_faces
,
faces_coordinates
)
def
_
normalize_face
(
face
):
def
normalize_face
(
face
):
face
=
cv2
.
cvtColor
(
face
,
cv2
.
COLOR_BGR2GRAY
)
face
=
cv2
.
cvtColor
(
face
,
cv2
.
COLOR_BGR2GRAY
)
face
=
cv2
.
resize
(
face
,
(
250
,
250
))
face
=
cv2
.
resize
(
face
,
(
250
,
250
))
return
face
return
face
def
locate_faces
(
image
,
scaleFactor
=
2
,
minNeighbors
=
4
):
def
locate_faces
(
image
,
scaleFactor
=
1.4
,
minNeighbors
=
3
):
face
_one
=
faceDet
.
detectMultiScale
(
image
,
scaleFactor
,
minNeighbors
,
minSize
=
(
20
,
2
0
),
face
=
faceDet_one
.
detectMultiScale
(
image
,
scaleFactor
,
minNeighbors
,
minSize
=
(
40
,
4
0
),
flags
=
cv2
.
CASCADE_SCALE_IMAGE
)
flags
=
cv2
.
CASCADE_SCALE_IMAGE
)
if
len
(
face
_one
)
==
1
:
if
len
(
face
)
==
1
:
return
face
_one
return
face
face
_two
=
faceDet_two
.
detectMultiScale
(
image
,
scaleFactor
,
minNeighbors
,
minSize
=
(
20
,
2
0
),
face
=
faceDet_two
.
detectMultiScale
(
image
,
scaleFactor
,
minNeighbors
,
minSize
=
(
40
,
4
0
),
flags
=
cv2
.
CASCADE_SCALE_IMAGE
)
flags
=
cv2
.
CASCADE_SCALE_IMAGE
)
if
len
(
face
_two
)
==
1
:
if
len
(
face
)
==
1
:
return
face
_two
return
face
face
_three
=
faceDet_three
.
detectMultiScale
(
image
,
scaleFactor
,
minNeighbors
,
minSize
=
(
20
,
2
0
),
face
=
faceDet_three
.
detectMultiScale
(
image
,
scaleFactor
,
minNeighbors
,
minSize
=
(
40
,
4
0
),
flags
=
cv2
.
CASCADE_SCALE_IMAGE
)
flags
=
cv2
.
CASCADE_SCALE_IMAGE
)
if
len
(
face
_three
)
==
1
:
if
len
(
face
)
==
1
:
return
face
_three
return
face
face
_four
=
faceDet_four
.
detectMultiScale
(
image
,
scaleFactor
,
minNeighbors
,
minSize
=
(
20
,
2
0
),
face
=
faceDet_four
.
detectMultiScale
(
image
,
scaleFactor
,
minNeighbors
,
minSize
=
(
40
,
4
0
),
flags
=
cv2
.
CASCADE_SCALE_IMAGE
)
flags
=
cv2
.
CASCADE_SCALE_IMAGE
)
if
len
(
face
_four
)
==
1
:
if
len
(
face
)
==
1
:
return
face
_four
return
face
return
''
return
''
projectmood/image_sort.py
View file @
497b8e88
import
glob
import
glob
from
shutil
import
copyfile
from
shutil
import
copyfile
emotions
=
[
"neutral"
,
"anger"
,
"contempt"
,
"disgust"
,
"fear"
,
"happy"
,
"sadness"
,
"surprise"
]
#
Define emotion order
emotions
=
[
"neutral"
,
"anger"
,
"contempt"
,
"disgust"
,
"fear"
,
"happy"
,
"sadness"
,
"surprise"
]
#
Emotionen - Reihenfolge wichtig!
participants
=
glob
.
glob
(
"basis_data/source_emotion/*"
)
#
Returns a list of all folders with participant numbers
participants
=
glob
.
glob
(
"basis_data/source_emotion/*"
)
#
Eine Liste mit den Dateiordnern aller Teilnehmer
for
x
in
participants
:
for
x
in
participants
:
part
=
"
%
s"
%
x
[
-
4
:]
# store current participant numb
er
number
=
"
%
s"
%
x
[
-
4
:]
# Teilnehmernumm
er
for
sessions
in
glob
.
glob
(
"
basis_data/
%
s/*"
%
x
):
# Store list of sessions for current participant
for
sessions
in
glob
.
glob
(
"
%
s/*"
%
x
):
for
files
in
glob
.
glob
(
"
basis_data/
%
s/*"
%
sessions
):
for
files
in
glob
.
glob
(
"
%
s/*"
%
sessions
):
current_session
=
files
[
20
:
-
30
]
current_session
=
files
[
31
:
-
30
]
# Sessionnummer
file
=
open
(
files
,
'r'
)
file
=
open
(
files
,
'r'
)
# Öffne die zur aktuellen Emotion korrelierende .txt Datei
emotion
=
int
(
emotion
=
int
(
float
(
file
.
readline
()))
#
emotions are encoded as a float, readline as float, then convert to integer.
float
(
file
.
readline
()))
#
In der Datei steht die aktuell betrachtete Emotion, kodiert als float-Wert
source
file_emotion
=
glob
.
glob
(
"basis_data/source_images/
%
s/
%
s/*"
%
(
part
,
current_session
))[
source
_emotions
=
glob
.
glob
(
"basis_data/source_images/
%
s/
%
s/*.png"
%
(
number
,
current_session
))
-
1
]
# get path for last image in sequence, which contains the emotion
source_emotions
.
sort
()
sourcefile_
neutral
=
glob
.
glob
(
"basis_data/source_images/
%
s/
%
s/*"
%
(
part
,
current_session
))[
sourcefile_
emotion
=
source_emotions
[
-
1
]
# Das letzte Bild einer Sequenz ist die ausgeprägte Emotion
0
]
# do same for neutral image
sourcefile_neutral
=
source_emotions
[
0
]
# Das erste Bild ist ein neutraler Ausdruck
dest_neut
=
"basis_data/sorted_set/neutral/
%
s"
%
sourcefile_neutral
[
25
:]
# Generate path to put neutral image
# Erstelle neue Pfade zum einsortieren
dest_
emot
=
"basis_data/sorted_set/
%
s/
%
s"
%
(
dest_
neut
=
"basis_data/sorted_set/neutral/
%
s"
%
sourcefile_neutral
[
36
:]
# für den neutralen Ausdruck
emotions
[
emotion
],
sourcefile_emotion
[
25
:])
# Do same for emotion containing image
dest_emot
=
"basis_data/sorted_set/
%
s/
%
s"
%
(
emotions
[
emotion
],
sourcefile_emotion
[
36
:])
# und die Emotion
copyfile
(
sourcefile_neutral
,
dest_neut
)
# Copy file
# Kopiere Dateien
copyfile
(
sourcefile_emotion
,
dest_emot
)
# Copy file
copyfile
(
sourcefile_neutral
,
dest_neut
)
\ No newline at end of file
copyfile
(
sourcefile_emotion
,
dest_emot
)
\ No newline at end of file
projectmood/process_model.py
View file @
497b8e88
...
@@ -8,19 +8,32 @@ import logging
...
@@ -8,19 +8,32 @@ import logging
logging
.
basicConfig
(
level
=
logging
.
DEBUG
,
format
=
'
%(asctime)
s
%(levelname)-8
s
%(message)
s'
,
logging
.
basicConfig
(
level
=
logging
.
DEBUG
,
format
=
'
%(asctime)
s
%(levelname)-8
s
%(message)
s'
,
datefmt
=
'
%
m-
%
d
%
H:
%
M'
,
datefmt
=
'
%
m-
%
d
%
H:
%
M'
,
filename
=
'logs/process_model.log'
)
filename
=
'logs/process_model.log'
)
args
=
sys
.
argv
args
=
sys
.
argv
# liest Input Parameter
logging
.
debug
(
'Fisherface training initialized'
)
logging
.
debug
(
'Fisherface training initialized'
)
def
get_files_from_emotion
(
emotion
):
# Define function to get file list, randomly shuffle it and split 80/20
file
=
open
(
"gray_equalized.csv"
,
"w"
)
files
=
glob
.
glob
(
'basis_data/dataset/
%
s/*'
%
emotion
)
def
_get_faces_from_emotion
(
emotion
):
"""
Holt alle Dateien zu einer Emotion aus dem Dataset, mischt sie und teilt sie in ein Trainings- und Prognoseset.
:param emotion: Die Emotion
:return: training, prediction
"""
files
=
glob
.
glob
(
'basis_data/dataset/{}/*'
.
format
(
emotion
))
random
.
shuffle
(
files
)
random
.
shuffle
(
files
)
training
=
files
[:
int
(
len
(
files
)
*
0.8
)]
# get first 80% of file list
training
=
files
[:
int
(
len
(
files
)
*
0.8
)]
# get first 80% of file list
prediction
=
files
[
-
int
(
len
(
files
)
*
0.2
):]
# get last 20% of file list
prediction
=
files
[
-
int
(
len
(
files
)
*
0.2
):]
# get last 20% of file list
return
training
,
prediction
return
training
,
prediction
def
image_preprocessing
(
item
):
def
image_preprocessing
(
image
):
image
=
cv2
.
imread
(
item
)
# open image
"""
gray
=
cv2
.
cvtColor
(
image
,
cv2
.
COLOR_BGR2GRAY
)
# convert to grayscale
Preprocessing der Dateien
:param item: Bild
:return:
"""
img
=
cv2
.
imread
(
image
)
# open image
gray
=
cv2
.
cvtColor
(
img
,
cv2
.
COLOR_BGR2GRAY
)
# convert to grayscale
return
gray
return
gray
def
make_sets
():
def
make_sets
():
...
@@ -29,7 +42,7 @@ def make_sets():
...
@@ -29,7 +42,7 @@ def make_sets():
prediction_data
=
[]
prediction_data
=
[]
prediction_labels
=
[]
prediction_labels
=
[]
for
emotion
in
emotions
:
for
emotion
in
emotions
:
training
,
prediction
=
get_fil
es_from_emotion
(
emotion
)
training
,
prediction
=
_get_fac
es_from_emotion
(
emotion
)
# Append data to training and prediction list, and generate labels 0-7
# Append data to training and prediction list, and generate labels 0-7
for
item
in
training
:
for
item
in
training
:
img
=
image_preprocessing
(
item
)
img
=
image_preprocessing
(
item
)
...
@@ -66,16 +79,18 @@ if len(args) > 1:
...
@@ -66,16 +79,18 @@ if len(args) > 1:
tags
=
', '
.
join
(
args
[
1
:])
tags
=
', '
.
join
(
args
[
1
:])
logging
.
debug
(
tags
.
upper
())
logging
.
debug
(
tags
.
upper
())
emotions
=
[
"
anger"
,
"disgust"
,
"
happy"
,
"neutral"
,
"surprise"
]
# Emotion list
emotions
=
[
"happy"
,
"neutral"
,
"surprise"
]
# Emotion list
fishface
=
cv2
.
face
.
FisherFaceRecognizer_create
()
fishface
=
cv2
.
face
.
FisherFaceRecognizer_create
()
# Now run it
# Now run it
metascore
=
[]
metascore
=
[]
for
i
in
range
(
0
,
1
0
):
for
i
in
range
(
0
,
2
0
):
correct
=
run_recognizer
()
correct
=
run_recognizer
()
logging
.
debug
(
"{} : {}
%
"
.
format
(
i
,
int
(
correct
)))
logging
.
debug
(
"{} : {}
%
"
.
format
(
i
,
int
(
correct
)))
file
.
write
(
"{}, {}"
.
format
(
i
,
int
(
correct
)))
metascore
.
append
(
correct
)
metascore
.
append
(
correct
)
file
.
close
()
logging
.
debug
(
"{} iterations - {}
%
average
\n
"
.
format
(
len
(
metascore
),
np
.
mean
(
metascore
)))
logging
.
debug
(
"{} iterations - {}
%
average
\n
"
.
format
(
len
(
metascore
),
np
.
mean
(
metascore
)))
fishface
.
write
(
'basis_data/models/detection_model.xml'
)
fishface
.
write
(
'basis_data/models/detection_model.xml'
)
\ No newline at end of file
projectmood/sorted_set_facedetector.py
View file @
497b8e88
...
@@ -8,7 +8,7 @@ logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(name)-12s %(level
...
@@ -8,7 +8,7 @@ logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(name)-12s %(level
filename
=
'logs/sorted_set_facedetector.log'
,
filename
=
'logs/sorted_set_facedetector.log'
,
filemode
=
'w'
)
filemode
=
'w'
)
emotions
=
[
"
neutral"
,
"anger"
,
"disgust"
,
"happy"
,
"surprise"
]
# Define emotions
emotions
=
[
"
happy"
,
"neutral"
,
"surprise"
]
# Emotionen die verarbeitet werden sollen
totalFiles
:
int
=
0
totalFiles
:
int
=
0
totalFaces
:
int
=
0
totalFaces
:
int
=
0
...
@@ -16,7 +16,7 @@ undetected: list = []
...
@@ -16,7 +16,7 @@ undetected: list = []
def
detect_faces
(
emotion
):
def
detect_faces
(
emotion
):
files
=
glob
.
glob
(
'basis_data/sorted_set/
%
s/*'
%
emotion
)
# Get list of all images with emotion
files
=
glob
.
glob
(
'basis_data/sorted_set/
{}/*'
.
format
(
emotion
))
# Holt alle Dateien zu einer Emotion aus dem sorted_set
global
undetected
global
undetected
global
totalFaces
global
totalFaces
...
@@ -27,23 +27,24 @@ def detect_faces(emotion):
...
@@ -27,23 +27,24 @@ def detect_faces(emotion):
fileNumber
=
0
fileNumber
=
0
for
f
in
files
:
for
f
in
files
:
frame
=
cv2
.
imread
(
f
)
# Open image
frame
=
cv2
.
imread
(
f
)
# Open image
gray
=
cv2
.
cvtColor
(
frame
,
cv2
.
COLOR_BGR2GRAY
)
# Convert image to grayscale
facefeatures
=
locate_faces
(
frame
,
1.05
,
6
)
facefeatures
=
locate_faces
(
gray
,
1.05
,
10
)
if
facefeatures
is
''
:
if
facefeatures
is
''
:
undetected
.
append
(
f
)
undetected
.
append
(
f
)
# Cut and save face
else
:
for
(
x
,
y
,
w
,
h
)
in
facefeatures
:
# get coordinates and size of rectangle containing
face
# Cut and save
face
logging
.
debug
(
"face found in file: {}"
.
format
(
f
))
for
(
x
,
y
,
w
,
h
)
in
facefeatures
:
# get coordinates and size of rectangle containing face
totalFaces
+=
1
logging
.
debug
(
"face found in file: {}"
.
format
(
f
))
frame
=
frame
[
y
:
y
+
h
,
x
:
x
+
w
]
# Cut the frame to size
totalFaces
+=
1
gray
=
gray
[
y
:
y
+
h
,
x
:
x
+
w
]
# Cut the frame to size
try
:
try
:
out
=
cv2
.
resize
(
frame
,
(
250
,
250
))
# Resize face so all images have same size
out
=
cv2
.
resize
(
gray
,
(
250
,
250
))
# Resize face so all images have same size
cv2
.
imwrite
(
'basis_data/dataset/
%
s/
%
s.jpg'
%
(
emotion
,
fileNumber
),
out
)
# Write image
cv2
.
imwrite
(
'basis_data/dataset/{}/{}.jpg'
.
format
(
emotion
,
fileNumber
),
out
)
# Write image
except
:
except
:
pass
# If error, pass file
pass
# If error, pass file
totalFiles
+=
1
# Increment image number
totalFiles
+=
1
# Increment image number
fileNumber
+=
1
fileNumber
+=
1
...
...
projectmood/webcam.py
View file @
497b8e88
...
@@ -3,14 +3,13 @@ This module is the main module in this package. It loads emotion recognition mod
...
@@ -3,14 +3,13 @@ This module is the main module in this package. It loads emotion recognition mod
shows a webcam image, recognizes face and it's emotion and draw emotion on the image.
shows a webcam image, recognizes face and it's emotion and draw emotion on the image.
"""
"""
from
cv2
import
WINDOW_NORMAL
import
cv2
import
cv2
from
RingBuffer
import
RingBuffer
from
WebcamVideoStream
import
WebcamVideoStream
from
WebcamVideoStream
import
WebcamVideoStream
from
face_detect
import
find_faces
from
face_detect
import
find_faces
from
image_commons
import
nparray_as_image
,
draw_with_alpha
from
image_commons
import
nparray_as_image
,
draw_with_alpha
import
numpy
as
np
def
_load_emoticons
(
emotions
):
def
_load_emoticons
(
emotions
):
...
@@ -22,7 +21,7 @@ def _load_emoticons(emotions):
...
@@ -22,7 +21,7 @@ def _load_emoticons(emotions):
return
[
nparray_as_image
(
cv2
.
imread
(
'resources/graphics/
%
s.png'
%
emotion
,
-
1
),
mode
=
None
)
for
emotion
in
emotions
]
return
[
nparray_as_image
(
cv2
.
imread
(
'resources/graphics/
%
s.png'
%
emotion
,
-
1
),
mode
=
None
)
for
emotion
in
emotions
]
def
show_webcam_and_run
(
model
,
emoticons
,
window_size
=
(
800
,
8
00
),
window_name
=
'Mood Expression'
,
update_time
=
1
):
def
show_webcam_and_run
(
model
,
emoticons
,
window_size
=
(
600
,
6
00
),
window_name
=
'Mood Expression'
,
update_time
=
1
):
"""
"""
Shows webcam image, detects faces and its emotions in real time and draw emoticons over those faces.
Shows webcam image, detects faces and its emotions in real time and draw emoticons over those faces.
:param model: Learnt emotion detection model.
:param model: Learnt emotion detection model.
...
@@ -31,21 +30,27 @@ def show_webcam_and_run(model, emoticons, window_size=(800, 800), window_name='M
...
@@ -31,21 +30,27 @@ def show_webcam_and_run(model, emoticons, window_size=(800, 800), window_name='M
:param window_name: Name of webcam image window.
:param window_name: Name of webcam image window.
:param update_time: Image update time interval.
:param update_time: Image update time interval.
"""
"""
cv2
.
namedWindow
(
window_name
,
WINDOW_NORMAL
)
cv2
.
namedWindow
(
window_name
,
cv2
.
WINDOW_NORMAL
)
if
window_size
:
if
window_size
:
width
,
height
=
window_size
width
,
height
=
window_size
cv2
.
resizeWindow
(
window_name
,
width
,
height
)
cv2
.
resizeWindow
(
window_name
,
width
,
height
)
vc
=
WebcamVideoStream
(
src
=
0
)
.
start
()
vc
=
WebcamVideoStream
()
.
start
()
frame
=
vc
.
read
()
frame
=
vc
.
read
()
puffer
=
RingBuffer
(
7
)
# Der RingBuffer speichert die letzten Predictions
while
True
:
while
True
:
for
normalized_face
,
(
x
,
y
,
w
,
h
)
in
find_faces
(
frame
):
for
normalized_face
,
(
x
,
y
,
w
,
h
)
in
find_faces
(
frame
):
prediction
=
model
.
predict
(
normalized_face
)
# do prediction
prediction
=
model
.
predict
(
normalized_face
)
# do prediction
image_to_draw
=
emoticons
[(
prediction
[
0
])]
puffer
.
append
(
prediction
[
0
])
# Speichere letzte Prediction
if
x
-
150
>
0
and
y
-
50
>
0
and
w
-
150
>
0
and
h
-
150
>
0
:
preds
=
puffer
.
get
()
# Hole Einträge als Array
draw_with_alpha
(
frame
,
image_to_draw
,
(
x
-
150
,
y
-
50
,
w
-
150
,
h
-
150
))
if
not
(
any
(
x
is
None
for
x
in
preds
)):
# Kein Eintrag im RingBuffer ist None
unique
,
counts
=
np
.
unique
(
preds
,
return_counts
=
True
)
# Vorkommen der Predictions zählen
image_to_draw
=
emoticons
[
unique
[
0
]]
# häufigster Wert wird dargestellt
draw_with_alpha
(
frame
,
image_to_draw
,
(
40
,
40
,
200
,
200
))
cv2
.
imshow
(
window_name
,
frame
)
cv2
.
imshow
(
window_name
,
frame
)
frame
=
vc
.
read
()
frame
=
vc
.
read
()
...
@@ -59,7 +64,7 @@ def show_webcam_and_run(model, emoticons, window_size=(800, 800), window_name='M
...
@@ -59,7 +64,7 @@ def show_webcam_and_run(model, emoticons, window_size=(800, 800), window_name='M
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
emotions
=
[
'
neutral'
,
'anger'
,
'disgust'
,
'happy
'
,
'surprise'
]
emotions
=
[
'
happy'
,
'neutral
'
,
'surprise'
]
emoticons
=
_load_emoticons
(
emotions
)
emoticons
=
_load_emoticons
(
emotions
)
# load mode
# load mode
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment