Skip to content
Snippets Groups Projects
Commit 1665a5a3 authored by Timothé Boulet's avatar Timothé Boulet :alien:
Browse files

add loadRadvess dataset

parent cf7f9c40
Branches
No related tags found
No related merge requests found
No preview for this file type
No preview for this file type
No preview for this file type
File added
File added
No preview for this file type
%% Cell type:code id: tags:
```
#@title Imports
#%load_ext autoreload #Need to uncomment for import, dont understand
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets, layers, models, losses
import tensorflow_datasets as tfds
#from google.colab import files
from matplotlib import image
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import random as rd
import cv2
import csv
from loadFer2013ds import *
from loadRavdessDs import *
from utils import *
X, Y = loadFer2013Data(100)
W, Z = loadRavdessData(100)
```
%% Output
Model used: firstModel
Image 1 sur 100 chargée
Image 2 sur 100 chargée
Image 3 sur 100 chargée
Image 4 sur 100 chargée
Image 5 sur 100 chargée
Image 6 sur 100 chargée
Image 7 sur 100 chargée
Image 8 sur 100 chargée
Image 9 sur 100 chargée
Image 10 sur 100 chargée
Image 11 sur 100 chargée
Image 12 sur 100 chargée
Image 13 sur 100 chargée
Image 14 sur 100 chargée
Image 15 sur 100 chargée
Image 16 sur 100 chargée
Image 17 sur 100 chargée
Image 18 sur 100 chargée
Image 19 sur 100 chargée
Image 20 sur 100 chargée
Image 21 sur 100 chargée
Image 22 sur 100 chargée
Image 23 sur 100 chargée
Image 24 sur 100 chargée
Image 25 sur 100 chargée
Image 26 sur 100 chargée
Image 27 sur 100 chargée
Image 28 sur 100 chargée
Image 29 sur 100 chargée
Image 30 sur 100 chargée
Image 31 sur 100 chargée
Image 32 sur 100 chargée
Image 33 sur 100 chargée
Image 34 sur 100 chargée
Image 35 sur 100 chargée
Image 36 sur 100 chargée
Image 37 sur 100 chargée
Image 38 sur 100 chargée
Image 39 sur 100 chargée
Image 40 sur 100 chargée
Image 41 sur 100 chargée
Image 42 sur 100 chargée
Image 43 sur 100 chargée
Image 44 sur 100 chargée
Image 45 sur 100 chargée
Image 46 sur 100 chargée
Image 47 sur 100 chargée
Image 48 sur 100 chargée
Image 49 sur 100 chargée
Image 50 sur 100 chargée
Image 51 sur 100 chargée
Image 52 sur 100 chargée
Image 53 sur 100 chargée
Image 54 sur 100 chargée
Image 55 sur 100 chargée
Image 56 sur 100 chargée
Image 57 sur 100 chargée
Image 58 sur 100 chargée
Image 59 sur 100 chargée
Image 60 sur 100 chargée
Image 61 sur 100 chargée
Image 62 sur 100 chargée
Image 63 sur 100 chargée
Image 64 sur 100 chargée
Image 65 sur 100 chargée
Image 66 sur 100 chargée
Image 67 sur 100 chargée
Image 68 sur 100 chargée
Image 69 sur 100 chargée
Image 70 sur 100 chargée
Image 71 sur 100 chargée
Image 72 sur 100 chargée
Image 73 sur 100 chargée
Image 74 sur 100 chargée
Image 75 sur 100 chargée
Image 76 sur 100 chargée
Image 77 sur 100 chargée
Image 78 sur 100 chargée
Image 79 sur 100 chargée
Image 80 sur 100 chargée
Image 81 sur 100 chargée
Image 82 sur 100 chargée
Image 83 sur 100 chargée
Image 84 sur 100 chargée
Image 85 sur 100 chargée
Image 86 sur 100 chargée
Image 87 sur 100 chargée
Image 88 sur 100 chargée
Image 89 sur 100 chargée
Image 90 sur 100 chargée
Image 91 sur 100 chargée
Image 92 sur 100 chargée
Image 93 sur 100 chargée
Image 94 sur 100 chargée
Image 95 sur 100 chargée
Image 96 sur 100 chargée
Image 97 sur 100 chargée
Image 98 sur 100 chargée
Image 99 sur 100 chargée
Image 100 sur 100 chargée
TRAITEMENT ACTEUR N°14
Traitement de 01-01-03-02-02-02-01.mp4, video 1/2
Lecture vidéo de 01-01-03-02-02-02-01.mp4
Donnée ajoutée, Images: 1 Labels: 1
%% Cell type:code id: tags:
```
```
%% Cell type:code id: tags:
```
#@title Hyperparamètres
classes = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Suprise", "Neutral"]
Na = len(classes)
maxNbrImagesForEachClasses = float('inf')
h = 48
l = 48
p = 1
input_shape = (h, l, p)
epochs = 5
batch_size = 128
validation_size = 0.1
```
%% Cell type:code id: tags:
```
```
%% Cell type:code id: tags:
```
```
...@@ -4,9 +4,11 @@ import numpy as np ...@@ -4,9 +4,11 @@ import numpy as np
import faceAnalysis as fa import faceAnalysis as fa
import timeit as ti import timeit as ti
def imageProcess(image): def imageProcess(image, writeEmotion=True):
#Objectives : detect faces, identify emotion associated on it, modify the image by framing faces and writing their emotions associated #Objectives : detect faces, identify emotion associated on it, modify the image by framing faces and writing their emotions associated
facesList = []
#Import faces and eyes detectors from cv2 #Import faces and eyes detectors from cv2
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades+'haarcascade_frontalface_default.xml') face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades+'haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades+'haarcascade_eye.xml') eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades+'haarcascade_eye.xml')
...@@ -25,6 +27,7 @@ def imageProcess(image): ...@@ -25,6 +27,7 @@ def imageProcess(image):
#Select face image #Select face image
face_gray = gray[y:y+h, x:x+w] face_gray = gray[y:y+h, x:x+w]
face_color = image[y:y+h, x:x+w] face_color = image[y:y+h, x:x+w]
facesList.append(face_color)
#Detect eyes on the face, create green rectangle #Detect eyes on the face, create green rectangle
eyes = eye_cascade.detectMultiScale(face_gray) eyes = eye_cascade.detectMultiScale(face_gray)
...@@ -32,9 +35,11 @@ def imageProcess(image): ...@@ -32,9 +35,11 @@ def imageProcess(image):
cv2.rectangle(face_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),1) cv2.rectangle(face_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),1)
#Write emotion on the image #Write emotion on the image
if writeEmotion:
emotion = fa.detectEmotion(face_color) emotion = fa.detectEmotion(face_color)
cv2.putText(image, emotion, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2) cv2.putText(image, emotion, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2)
return facesList
def selectFace(image): def selectFace(image):
#Return a face identified on an colored image #Return a face identified on an colored image
...@@ -45,7 +50,7 @@ def selectFace(image): ...@@ -45,7 +50,7 @@ def selectFace(image):
#Face detection is made on gray images #Face detection is made on gray images
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5) #This return a list of tuple locating faces on image faces = face_cascade.detectMultiScale(gray, 1.03, 5) #This return a list of tuple locating faces on image
#The face returned is the first face detected on the image (if exists) #The face returned is the first face detected on the image (if exists)
if faces != []: if faces != []:
......
...@@ -5,10 +5,6 @@ import cv2 ...@@ -5,10 +5,6 @@ import cv2
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
nbrImages = 35887
maxNbrImages = nbrImages
emotions = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Suprise", "Neutral"]
def strToArray(string): # Fer2013 provides images as string so it needs to be transformed def strToArray(string): # Fer2013 provides images as string so it needs to be transformed
A = [] A = []
lenght = len(string) lenght = len(string)
...@@ -32,12 +28,17 @@ def strToArray(string): #Fer2013 provides images as string so it needs to be tr ...@@ -32,12 +28,17 @@ def strToArray(string): #Fer2013 provides images as string so it needs to be tr
return A return A
# LOAD DATA AS ARRAY # LOAD DATA AS ARRAY
X = []
Y = []
def loadFer2013Data(maxNbrImages=35887):
c = 0
nbrImagesFer2013 = 35887
filename = "data/fer2013.csv" filename = "data/fer2013.csv"
emotions = ["Angry", "Disgust", "Fear",
"Happy", "Sad", "Suprise", "Neutral"]
X = []
Y = []
with open(filename, 'r', encoding='utf-8') as file: with open(filename, 'r', encoding='utf-8') as file:
...@@ -46,17 +47,17 @@ with open(filename,'r',encoding='utf-8') as file: ...@@ -46,17 +47,17 @@ with open(filename,'r',encoding='utf-8') as file:
i = 0 i = 0
for row in csv_reader: for row in csv_reader:
i += 1 i += 1
if i>maxNbrImages: break if i > maxNbrImages:
break
emotionNbr, stringImage, typeImage = row emotionNbr, stringImage, typeImage = row
traitement(emotionNbr, stringImage, typeImage)
X.append(strToArray(stringImage)) X.append(strToArray(stringImage))
Y.append(emotionNbr) Y.append(emotionNbr)
print(f"Image {i} sur {nbrImages} chargée", end='\r') print(f"Image {i} sur {maxNbrImages} chargée")
X = np.array(X) X = np.array(X)
Y = np.array(Y) Y = np.array(Y)
return X, Y
import os
import cv2
from utils import *
import imageProcess as ip
from config import input_shape
def extractDataFromVideo(filename, videoName, facesList, labelsList):
# Extract every faces in a specified video and add it to a list of faces as well as labels corresponding.
# Start capture of a video
print("Lecture vidéo de", videoName)
frameRate = 1
cap = cv2.VideoCapture(filename)
if (cap.isOpened() == False):
print("Error opening video stream or file")
# Read until video is completed
k = 0
while (cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read()
if ret == True:
k += 1
if k*frameRate >= 1: # Read a frame each N frames where N=1/frameRate
k = 0
# Load image and labels
# Ravdess emotions list is not in the same order as fer2013 (reference)
emotionNbr = int(videoName[7])
emotion = emotions[emotionNbr]
emotionNbr = emotionToNumber(emotion)
#Detect faces on the image
newFaces = ip.imageProcess(frame, writeEmotion=False)
#If 2 faces were detected, it means an error was made since there is only single-person videos.
if len(newFaces) == 1:
facesList += newFaces
labelsList.append(emotionNbr)
print("Donnée ajoutée, Images:", len(
facesList), "Labels:", len(labelsList))
else: print("Erreur pour la donnée : Aucun ou plusieurs visages détectés")
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# Display the resulting frame
if True:
cv2.imshow('Frame', frame)
# Break the loop
else:
break
# When everything done, release the video capture object
cap.release()
# Closes all the frames
cv2.destroyAllWindows()
# Return face and label lists with new datas
return facesList, labelsList
# LOAD DATA
def loadRavdessData(maxNbrImages=float('inf')):
foldername = "data/ravdessTest/videos/"
emotions = ["_", "Neutral", "Calm", "Happy",
"Sad", "Angry", "Fear", "Disgust", "Suprise"]
facesList = []
labelsList = []
# For each actor...
for actorName in os.listdir(foldername):
# If we overreach the maximum number of images desired, stop
if len(facesList) > maxNbrImages:
break
print(f"\nTRAITEMENT ACTEUR N°{actorName[-2:]}\n")
videoNames = os.listdir(foldername+actorName)
nbrOfVideos = len(videoNames)
k = 0
# For each video...
for videoName in videoNames:
k += 1
print(f"Traitement de {videoName}, video {k}/{nbrOfVideos}")
filename = foldername+actorName+'/'+videoName
# Press Q on keyboard to exit ONE video
if cv2.waitKey(25) & 0xFF == ord('q'):
break
if videoName[7] == '2':
# Doesnt take Calm emotion into account
print("Emotion 'Calme', non prise en compte")
else:
facesList, labelsList = extractDataFromVideo(
filename, videoName, facesList, labelsList)
# List of colored images N*M*3 faces to array of gray images 48*48*1
N = len(facesList)
print(f"Traitement des {N} visages détectés sur les vidéos de Ravdess")
for k in range(N):
visage = facesList[k]
facesList[k] = normAndResize(visage, input_shape)
X = np.array(facesList)
Y = np.array(labelsList)
print(N, " données chargées depuis Ravdess.")
return X, Y
#Use this file for test #Use this file for test
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets, layers, models, losses
import tensorflow_datasets as tfds
#from google.colab import files
from matplotlib import image
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import random as rd
import cv2
import csv
from loadFer2013ds import *
from loadRavdessDs import *
from utils import *
X, Y = loadFer2013Data(100)
W, Z = loadRavdessData(100)
\ No newline at end of file
import numpy as np import numpy as np
import cv2 import cv2
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from config import emotions
def afficher(image): def afficher(image):
if len(image.shape) == 3: if len(image.shape) == 3:
...@@ -29,7 +30,10 @@ def normAndResize(image, input_shape): ...@@ -29,7 +30,10 @@ def normAndResize(image, input_shape):
image = image.mean(2) image = image.mean(2)
image = np.reshape(image, (h, l, p)) # restore third dimension image = np.reshape(image, (h, l, p)) # restore third dimension
image = image.astype("float32") image = image.astype("float32")
image = image/255 # normalisation image = (image/127.5)-1 # normalisation
return image return image
def emotionToNumber(emotion):
emotions = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Suprise", "Neutral"]
return emotions.index(emotion)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment