Skip to content
Snippets Groups Projects
Commit bb70eef9 authored by Timothé Boulet's avatar Timothé Boulet :alien:
Browse files

fix bug

parent 96cade95
No related branches found
No related tags found
No related merge requests found
data data
config
test
\ No newline at end of file
No preview for this file type
No preview for this file type
No preview for this file type
File added
No preview for this file type
Source diff could not be displayed: it is too large. Options to address this: view the blob.
...@@ -5,13 +5,14 @@ import cv2 ...@@ -5,13 +5,14 @@ import cv2
from utils import * from utils import *
from config import emotions, input_shape, modelName from config import emotions, input_shape, modelName
model = keras.models.load_model(modelName) #Load our model
print('Model used:', modelName)
def detectEmotion(face): def detectEmotion(face):
#Return the most likely emotion there is on a 48x48x1 gray face #Return the most likely emotion there is on a face
face = normAndResize(face, input_shape) #Process our image for input of model face = normAndResize(face, input_shape) #Process our image for input of model
model = keras.models.load_model(modelName) #Load our model
emotionVect = predir(model, face) emotionVect = predir(model, face)
emotionNbr = np.argmax(emotionVect) emotionNbr = np.argmax(emotionVect)
emotion = emotions[emotionNbr] emotion = emotions[emotionNbr]
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
import cv2 import cv2
import numpy as np import numpy as np
import faceAnalysis as fa import faceAnalysis as fa
import timeit as ti
def imageProcess(image): def imageProcess(image):
#Objectives : detect faces, identify emotion associated on it, modify the image by framing faces and writing their emotions associated #Objectives : detect faces, identify emotion associated on it, modify the image by framing faces and writing their emotions associated
...@@ -35,7 +36,6 @@ def imageProcess(image): ...@@ -35,7 +36,6 @@ def imageProcess(image):
cv2.putText(image, emotion, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2) cv2.putText(image, emotion, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2)
def selectFace(image): def selectFace(image):
#Return a face identified on an colored image #Return a face identified on an colored image
......
import numpy as np import numpy as np
import cv2 import cv2
import matplotlib.pyplot as plt
def afficher(image): def afficher(image):
if len(image.shape) == 3: if len(image.shape) == 3:
...@@ -11,19 +12,24 @@ def afficher(image): ...@@ -11,19 +12,24 @@ def afficher(image):
elif len(image.shape) == 2: # (h,l) elif len(image.shape) == 2: # (h,l)
plt.imshow(image) plt.imshow(image)
def predir(modele, image): def predir(modele, image):
# Return output of image from modele # Return output of image from modele
modele.predict(np.array([image]))[0,:] return modele.predict(np.array([image]))[0, :]
def normAndResize(image, input_shape): def normAndResize(image, input_shape):
# For an array image of shape (a,b,c) or (a,b), transform it into (h,l,p). Also normalize it. # For an array image of shape (a,b,c) or (a,b), transform it into (h,l,p). Also normalize it.
h, l, p = input_shape h, l, p = input_shape
image = cv2.resize(image, dsize=(h,l), interpolation=cv2.INTER_CUBIC) #resize for h and l # # resize for h and l #
if len(image.shape) == 3 and p==1 and image.shape[2] != 1 : #if we want (h,l,3) -> (h,l,1) , we first transform it in to (h,l) (grey the image) image = cv2.resize(image, dsize=(h, l), interpolation=cv2.INTER_CUBIC)
# if we want (h,l,3) -> (h,l,1) , we first transform it in to (h,l) (grey the image)
if len(image.shape) == 3 and p == 1 and image.shape[2] != 1:
image = image.mean(2) image = image.mean(2)
image = np.reshape(image, (h, l, p)) # restore third dimension image = np.reshape(image, (h, l, p)) # restore third dimension
image = image.astype("float32") image = image.astype("float32")
image = image/255 # normalisation image = image/255 # normalisation
return image return image
...@@ -4,7 +4,7 @@ import imageProcess as ip ...@@ -4,7 +4,7 @@ import imageProcess as ip
cap = cv2.VideoCapture(0) #0 means we capture the first camera, your webcam probably cap = cv2.VideoCapture(0) #0 means we capture the first camera, your webcam probably
while cap.isOpened(): while cap.isOpened(): #or while 1. cap.isOpened() is false if there is a problem
ret, frame = cap.read() #Read next video frame, stop if frame not well read ret, frame = cap.read() #Read next video frame, stop if frame not well read
if not ret: break if not ret: break
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment