Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision
  • master
1 result

Target

Select target project
  • automatants/facial-expression-detection
  • 2021romandfra/facial-expression-detection
2 results
Select Git revision
  • master
1 result
Show changes
Showing
with 894 additions and 339 deletions
This diff is collapsed.
This diff is collapsed.
# Name of model used
modelName = 'firstModel'
modelName = 'exp903'
# Emotions provided by the dataset
emotions = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Suprise", "Neutral"]
emotions = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"]
# Shape of input of the model
input_shape = (48, 48, 1)
# input_shape = (64,64,1)
#Objective of this file is to analyse a face
import keras
print("Chargement du modèle...")
import numpy as np
import cv2
from utils import *
from config import emotions, input_shape, modelName
model = keras.models.load_model("models/"+modelName) #Load our model
#model = tf.keras.models.load_model("models/"+modelName) #Load our model
model = tf.saved_model.load("models/"+modelName)
print('Model used:', modelName)
def detectEmotion(face):
......
from time import monotonic_ns
from numpy.core.arrayprint import _make_options_dict
def smileyRandom(emotionToDodge):
#Return a random smiley and te emotion associated
import cv2
import random
from config import emotions
emotionNbr = random.randrange(0,6)
emotion = emotions[emotionNbr]
if emotion == emotionToDodge: return smileyRandom(emotion)
smileyImagePath = "data/smileys/"+emotion+".png"
smiley = cv2.imread(smileyImagePath)
return smiley, emotion
def game(playTime = 30, invincibleFrame=0.5, dt_required=0.5, n_photos=None):
#Play a game.
# playTime : durée de jeu
# invincibleFrame : durée minimale entre deux émotions
# dt_required : durée minimal nécessaire pour valider une émotion
# n_photos : nombre de photos prises
#Use your camera for processing the video. Stop by pressing Q
import cv2
import matplotlib.pyplot as plt
import imageProcess as ip
import time
cap = cv2.VideoCapture(0) #0 means we capture the first camera, your webcam probably
score = 0
timeScoring = time.time() #last instant an emotion was found.
timeInitial = time.time()
timeSinceOtherEmotions = time.time()
timeLastPhoto = time.time()
smiley, emotion = smileyRandom("")
smileyNeutral = smiley.copy()
photos= []
while cap.isOpened():
ret, frame = cap.read() #Read next video frame, stop if frame not well read
if not ret: break
emotionsList = ip.imageProcess(frame, returnEmotion=True)
if time.time()-timeSinceOtherEmotions > dt_required: #If emotions maintained for dt seconds, score is increased and a new smiley is generated
score += 1
smiley, emotion = smileyRandom(emotion)
smileyNeutral = smiley.copy()
timeScoring = time.time()
timeSinceOtherEmotions = time.time()
elif emotion in emotionsList and time.time()-timeScoring>invincibleFrame: #If emotion recognized, increase score, reset smiley to mimick, start timer for impossibility of scoring (0.5s)
pass
else:
timeSinceOtherEmotions = time.time()
#Modify and show photos
smiley = smileyNeutral.copy()
cv2.imshow("Caméra", frame) #Show you making emotional faces
cv2.putText(smiley, "Score: "+str(score), (40,40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
cv2.putText(smiley, "Timer: "+str(round(time.time()-timeInitial, 1)), (20,240), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2)
cv2.imshow("Smiley", smiley) #Show the smiley to mimic
#Save temporarily photo:
if n_photos is not None:
if time.time()-timeLastPhoto > playTime/(n_photos+1):
timeLastPhoto = time.time()
photos.append(frame)
#Stop game if Q pressd or time exceed play time.
if cv2.waitKey(1) & 0xFF == ord('q'): #If you press Q, stop the while and so the capture
break
elif cv2.waitKey(1) & 0xFF == ord('p'): #If you press P, pass the smiley but lower your score
score -= 1
smiley, emotion = smileyRandom(emotion)
smileyNeutral = smiley.copy()
timeScoring = time.time()
timeSinceOtherEmotions = time.time()
elif time.time() - timeInitial > playTime:
break
cap.release()
cv2.destroyAllWindows()
print(f"Jeu terminé ! Vous avez imité {score} emotions en {playTime} secondes !")
if n_photos is not None:
print("Voici quelques photos prises lors de votre performance =)")
for photo in photos:
plt.imshow(photo)
plt.xticks([])
plt.yticks([])
plt.show()
if __name__ == "__main__":
game()
......@@ -4,10 +4,11 @@ import numpy as np
import faceAnalysis as fa
import timeit as ti
def imageProcess(image, writeEmotion=True):
def imageProcess(image, writeEmotion=True, writeRectangle=True, returnEmotion=False):
#Objectives : detect faces, identify emotion associated on it, modify the image by framing faces and writing their emotions associated
facesList = []
emotionsList = []
#Import faces and eyes detectors from cv2
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades+'haarcascade_frontalface_default.xml')
......@@ -22,6 +23,7 @@ def imageProcess(image, writeEmotion=True):
x,y,w,h = face
#Create blue rectangle around face of thickness 2
if writeRectangle:
cv2.rectangle(image, (x,y), (x+w,y+h), (255,0,0), 2 )
#Select face image
......@@ -33,7 +35,9 @@ def imageProcess(image, writeEmotion=True):
if writeEmotion:
emotion = fa.detectEmotion(face_color)
cv2.putText(image, emotion, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2)
emotionsList.append(emotion)
if returnEmotion: return emotionsList
return facesList
def selectFace(image):
......@@ -53,6 +57,7 @@ def selectFace(image):
face = image[y:y+h, x:x+w]
return face
#Some tests here.
# image = cv2.imread("cagnol.jpg", 1) #Load Cagnol colored image
# imageProcess(image)
# cv2.imshow("Cagnol", image)
......
import os
import cv2
from utils import *
import imageProcess as ip
from config import input_shape
def extractDataFromVideo_(filename, videoName, facesList, labelsList, maxNbrImages):
# Extract every faces in a specified video and add it to a list of faces as well as labels corresponding.
frameRate = 1/15
emotions = ["Neutral", "Angry", "Disgust", "Fear", "Happy", "Sad", "Suprise"]
# Start capture of a video and reading of a label file
cap = cv2.VideoCapture(filename)
if (cap.isOpened() == False):
print("Error opening video")
file = open("data/affwild/labels/"+videoName[:-4]+'.txt', 'r')
file.readline()
# Read until video is completed
k = 0
while (cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read()
line = file.readline()
if ret == True:
k += 1
if k*frameRate >= 1: # Read a frame each N frames where N=1/frameRate
k = 0
# Load image and labels
#Detect faces on the image
newFaces = ip.imageProcess(frame, writeEmotion=False)
#If 2 faces were detected, it means an error was made since there is only single-person videos here.
if len(newFaces) == 1:
facesList += newFaces
emotionNbr = emotionToNumber(emotions[int(line[0])])
labelsList.append(emotionNbr)
print("Donnée ajoutée, donnée :", len(facesList))
elif True: print("Erreur pour la donnée : Aucun ou plusieurs visages détectés")
# If we overreach the maximum number of images desired, stop
if len(facesList) > maxNbrImages:
break
# Press Q on keyboard to exit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Display the resulting frame
if False:
cv2.imshow('AffWild data extraction...', frame)
# Break the loop
else:
break
# When everything done, release the video capture object
cap.release()
# Closes all the frames
cv2.destroyAllWindows()
#Close file
file.close()
# Return face and label lists with new datas
return facesList, labelsList
# LOAD DATA
def loadAffwildData(maxNbrImages=10000000000):
print(f"\nCHARGEMENT DE {maxNbrImages} DONNEES DEPUIS AFFWILD...")
foldername = "data/affwild/videos/"
facesList = []
labelsList = []
k = 1
nbrOfVideos = len(os.listdir(foldername))
# For each video...
for videoName in os.listdir(foldername):
# If we overreach the maximum number of images desired, stop
if len(facesList) >= maxNbrImages:
break
elif videoName+'_left' in os.listdir("data/affwild/labels") or videoName+'_right' in os.listdir("data/affwild/labels"):
print("Vidéo à deux visages, non pris en compte")
else:
k+=1
print(f"Traitement de {videoName}, video {k}/{nbrOfVideos}")
filename = foldername+videoName
# Press Q on keyboard to exit ONE video
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#Add datas extracted from the specified video to features and labels
facesList, labelsList = extractDataFromVideo_(
filename, videoName, facesList, labelsList, maxNbrImages)
# List of colored images N*M*3 faces to array of gray images 48*48*1
N = len(facesList)
print(f"TRAITEMENT AFFWILD: traitement des {N} visages détectés sur les vidéos de AffWild...")
for k in range(N):
visage = facesList[k]
facesList[k] = normAndResize(visage, input_shape)
X = np.array(facesList)
Y = np.array(labelsList)
print(N, "données chargées depuis AffWild.")
return X, Y
import os
import cv2
from utils import *
import imageProcess as ip
from config import input_shape
def extractDataFromVideo_(filename, videoName, facesList, labelsList, maxNbrImages, frameRate):
# Extract every faces in a specified video and add it to a list of faces as well as labels corresponding.
emotions = ["Neutral", "Angry", "Disgust",
"Fear", "Happy", "Sad", "Suprise"]
# Start capture of a video and reading of a label file. Dont change the lists if file can not be read.
cap = cv2.VideoCapture(filename)
if (cap.isOpened() == False):
print("Error opening video")
try:
file = open("data/affwild/labels/"+videoName[:-4]+'.txt', 'r')
except FileNotFoundError:
return facesList, labelsList
file.readline()
# Read until video is completed
k = 0
while (cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read()
line = file.readline()
if ret == True:
k += 1
if k*frameRate >= 1: # Read a frame each N frames where N=1/frameRate
k = 0
# Load image and labels
# Detect faces on the image
newFaces = ip.imageProcess(frame, writeEmotion=False)
# If 2 faces were detected, it means an error was made since there is only single-person videos here.
# The second condition means the image is irrelevant (no face on the picture)
if len(newFaces) == 1 and line[0] != '-':
facesList += newFaces
emotionNbr = emotionToNumber(emotions[int(line[0])])
labelsList.append(emotionNbr)
elif False:
print(
"Erreur pour la donnée : Aucun ou plusieurs visages détectés", end='\r')
# If we overreach the maximum number of images desired, stop
if len(facesList) > maxNbrImages:
break
# Press Q on keyboard to exit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Display the resulting frame
if False:
cv2.imshow('AffWild data extraction...', frame)
# Break the loop
else:
break
# When everything done, release the video capture object
cap.release()
# Closes all the frames
cv2.destroyAllWindows()
# Close file
file.close()
# Return face and label lists with new datas
return facesList, labelsList
# LOAD DATA
def loadAffwildData(maxNbrImages=10000000000, frameRate=1/20):
print(f"\nCHARGEMENT DE {maxNbrImages} DONNEES DEPUIS AFFWILD...")
foldername = "data/affwild/videos/"
facesList = []
labelsList = []
maxNbrImages -= 1
k = 0
nbrOfVideos = len(os.listdir(foldername))
# For each video...
for videoName in os.listdir(foldername):
# If we overreach the maximum number of images desired, stop
if len(facesList) >= maxNbrImages:
break
elif videoName+'_left' in os.listdir("data/affwild/labels") or videoName+'_right' in os.listdir("data/affwild/labels"):
print("Vidéo à deux visages, non pris en compte")
else:
k += 1
print(f"Traitement de {videoName}, video {k}/{nbrOfVideos}")
filename = foldername+videoName
# Press Q on keyboard to exit ONE video
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Add datas extracted from the specified video to features and labels
facesList, labelsList = extractDataFromVideo_(
filename, videoName, facesList, labelsList, maxNbrImages, frameRate)
# List of colored images N*M*3 faces to array of gray images 48*48*1
N = len(facesList)
print(
f"TRAITEMENT AFFWILD: traitement des {N} visages détectés sur les vidéos de AffWild...")
for k in range(N):
visage = facesList[k]
facesList[k] = normAndResize(visage, input_shape)
X = np.array(facesList)
Y = np.array(labelsList)
print(N, "données chargées depuis AffWild.")
return X, Y
\ No newline at end of file
......@@ -5,7 +5,7 @@ import imageProcess as ip
import numpy as np
def loadExpWData(nbrMaxImages=float('inf'), onlyDetected=False):
def loadExpWData(nbrMaxImages=float('inf'), onlyDetected=False, detectedFace=False, count=False):
print(f"\nCHARGEMENT DE {nbrMaxImages} DONNEES DEPUIS EXPW...")
folderImages = 'data/expW/images/'
fileLabels = 'data/expW/labels.lst'
......@@ -20,11 +20,10 @@ def loadExpWData(nbrMaxImages=float('inf'), onlyDetected=False):
if nbrImages>=nbrMaxImages: break
k+= 1
#Face extraction, according to the dataset annotations AND the face detector (cascade)
#Face extraction, according to the dataset annotations
imageName, Id, top, left, right, bottom, cofidence, label = line.strip().split(' ')
image = cv2.imread(folderImages+imageName)
faceAccordingToDS = image[int(top):int(bottom), int(left):int(right)]
facesDetected = ip.imageProcess(faceAccordingToDS, writeEmotion=False)
#Suivi visuel (facultatif, fait un peu peur sans attendre 1000ms entre deux images...)
if False:
......@@ -33,13 +32,16 @@ def loadExpWData(nbrMaxImages=float('inf'), onlyDetected=False):
break
#Add extracted data to our dataset
if len(facesDetected) == 1 or not onlyDetected: #Otherwise no face were detected or a no-face was detected as face
#Select in priority image detected by detector
if len(facesDetected) != 0:
#Select detected face (if there is 1) or face according to the dataset
if detectedFace:
facesDetected = ip.imageProcess(faceAccordingToDS, writeEmotion=False, writeRectangle=False)
if len(facesDetected) ==1:
face = facesDetected[0]
else:
face = faceAccordingToDS
else:
face = faceAccordingToDS
#Colored N*M*3 face to gray 48*48*1 image.
gray = normAndResize(face, input_shape)
......@@ -48,13 +50,13 @@ def loadExpWData(nbrMaxImages=float('inf'), onlyDetected=False):
Y.append(label) #Emotion order is the same as fer2013.
nbrImages += 1
else: print("Erreur pour la donnée : Aucun ou plusieurs visages détectés")
print(f"{nbrImages} données chargées depuis expW (sur {k} données traités).\n")
#Print number of datas loaded every 1000 datas
if count and nbrImages%1000==0: print(f"{nbrImages} données chargées depuis expW (sur {k} données traités).")
X = np.array(X)
Y = np.array(Y)
print('\n')
return X, Y
......
......@@ -3,6 +3,8 @@ import csv
import numpy as np
import cv2
import matplotlib.pyplot as plt
from config import input_shape
from utils import *
def strToArray(string): # Fer2013 provides images as string so it needs to be transformed
......@@ -23,7 +25,7 @@ def strToArray(string): # Fer2013 provides images as string so it needs to be t
A.append(int(nbr))
A = np.array(A)
A = np.reshape(A, (48, 48))
A = np.reshape(A, (48,48,1))
return A
......@@ -33,8 +35,8 @@ def strToArray(string): # Fer2013 provides images as string so it needs to be t
def loadFer2013Data(maxNbrImages=35887):
print(f"\nCHARGEMENT DE {maxNbrImages} DONNEES DEPUIS FER2013 ...")
nbrImagesFer2013 = 35887
filename = "data/fer2013.csv"
maxNbrImages = min(maxNbrImages, 35887)
filename = "data/fer2013/fer2013.csv"
emotions = ["Angry", "Disgust", "Fear",
"Happy", "Sad", "Suprise", "Neutral"]
......@@ -54,7 +56,7 @@ def loadFer2013Data(maxNbrImages=35887):
emotionNbr, stringImage, typeImage = row
X.append(strToArray(stringImage))
X.append(normAndResize(strToArray(stringImage), input_shape))
Y.append(emotionNbr)
print(f"Donnée {i} sur {maxNbrImages} chargée", end='\r')
......
......@@ -5,15 +5,17 @@ import imageProcess as ip
from config import input_shape
def extractDataFromVideo(filename, videoName, facesList, labelsList, maxNbrImages):
def extractDataFromVideo(filename, videoName, facesList, labelsList, maxNbrImages, frameRate):
# Extract every faces in a specified video and add it to a list of faces as well as labels corresponding.
# Start capture of a video
frameRate = 1
cap = cv2.VideoCapture(filename)
if (cap.isOpened() == False):
print("Error opening video stream or file")
emotions_ravdess = ["_", "Neutral", "Calm", "Happy",
"Sad", "Angry", "Fear", "Disgust", "Suprise"]
# Read until video is completed
k = 0
while (cap.isOpened()):
......@@ -22,6 +24,7 @@ def extractDataFromVideo(filename, videoName, facesList, labelsList, maxNbrImage
if ret == True:
k += 1
if k*frameRate >= 1: # Read a frame each N frames where N=1/frameRate
k = 0
......@@ -29,7 +32,7 @@ def extractDataFromVideo(filename, videoName, facesList, labelsList, maxNbrImage
# Ravdess emotions list is not in the same order as fer2013 (reference)
emotionNbr = int(videoName[7])
emotion = emotions[emotionNbr]
emotion = emotions_ravdess[emotionNbr]
emotionNbr = emotionToNumber(emotion)
# Detect faces on the image
......@@ -39,18 +42,15 @@ def extractDataFromVideo(filename, videoName, facesList, labelsList, maxNbrImage
if len(newFaces) == 1:
facesList += newFaces
labelsList.append(emotionNbr)
elif True: print("Erreur pour la donnée : Aucun ou plusieurs visages détectés")
elif True:
print("Erreur pour la donnée : Aucun ou plusieurs visages détectés")
# If we overreach the maximum number of images desired, stop
if len(facesList) > maxNbrImages:
break
# Press Q on keyboard to exit
if cv2.waitKey(1) & 0xFF == ord('q'):
if len(facesList) >= maxNbrImages:
break
# Display the resulting frame
if True:
if False:
cv2.imshow('Frame', frame)
# Break the loop
......@@ -69,18 +69,21 @@ def extractDataFromVideo(filename, videoName, facesList, labelsList, maxNbrImage
# LOAD DATA
def loadRavdessData(maxNbrImages=10000000000):
def loadRavdessData(maxNbrImages=10000000000, frameRate = 1/20):
print(f"\nCHARGEMENT DE {maxNbrImages} DONNEES DEPUIS RAVDESS...")
foldername = "data/ravdessTest/videos/"
emotions = ["_", "Neutral", "Calm", "Happy",
"Sad", "Angry", "Fear", "Disgust", "Suprise"]
foldername = "data/ravdess/videos/"
facesList = []
labelsList = []
# For each actor...
for actorName in os.listdir(foldername):
# If we overreach the maximum number of images desired, stop
if len(facesList) >= maxNbrImages:
break
print(f"TRAITEMENT ACTEUR N°{actorName[-2:]}")
videoNames = os.listdir(foldername+actorName)
nbrOfVideos = len(videoNames)
......@@ -88,6 +91,7 @@ def loadRavdessData(maxNbrImages=10000000000):
k = 0
# For each video...
for videoName in videoNames:
# If we overreach the maximum number of images desired, stop
if len(facesList) >= maxNbrImages:
break
......@@ -96,21 +100,19 @@ def loadRavdessData(maxNbrImages=10000000000):
print(f"Traitement de {videoName}, video {k}/{nbrOfVideos}")
filename = foldername+actorName+'/'+videoName
# Press Q on keyboard to exit ONE video
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if videoName[7] == '2':
# Doesnt take Calm emotion into account
print("Emotion 'Calme', non prise en compte")
else:
elif videoName[7] in [str(n) for n in range(1, 9)]:
# Add datas extracted from the specified video to features and labels
facesList, labelsList = extractDataFromVideo(
filename, videoName, facesList, labelsList, maxNbrImages)
filename, videoName, facesList, labelsList, maxNbrImages, frameRate)
# List of colored images N*M*3 faces to array of gray images 48*48*1
N = len(facesList)
print(f"TRAITEMENT RAVDESS: traitement des {N} visages détectés sur les vidéos de Ravdess...")
print(
f"TRAITEMENT RAVDESS: traitement des {N} visages détectés sur les vidéos de Ravdess...")
for k in range(N):
visage = facesList[k]
......
from game import *
from videoCapture import *
#game(playTime=40, invincibleFrame=1, dt_required=0.3, n_photos=5)
videoCapture()
\ No newline at end of file
This diff is collapsed.
File added
File added
File added
This diff is collapsed.
File added
File added
File added