Skip to content
Snippets Groups Projects
Commit 011d6525 authored by Timothé Boulet's avatar Timothé Boulet :alien:
Browse files

data loaders debugged

parent 2b07760b
Branches
No related tags found
No related merge requests found
Showing
with 178 additions and 110 deletions
{
"python.pythonPath": "C:\\Users\\timot\\AppData\\Local\\Microsoft\\WindowsApps\\python.exe"
}
\ No newline at end of file
File added
File added
File added
File added
File added
File added
No preview for this file type
File added
File added
Source diff could not be displayed: it is too large. Options to address this: view the blob.
Source diff could not be displayed: it is too large. Options to address this: view the blob.
...@@ -4,7 +4,7 @@ import numpy as np ...@@ -4,7 +4,7 @@ import numpy as np
import faceAnalysis as fa import faceAnalysis as fa
import timeit as ti import timeit as ti
def imageProcess(image, writeEmotion=True): def imageProcess(image, writeEmotion=True, writeRectangle=True):
#Objectives : detect faces, identify emotion associated on it, modify the image by framing faces and writing their emotions associated #Objectives : detect faces, identify emotion associated on it, modify the image by framing faces and writing their emotions associated
facesList = [] facesList = []
...@@ -22,6 +22,7 @@ def imageProcess(image, writeEmotion=True): ...@@ -22,6 +22,7 @@ def imageProcess(image, writeEmotion=True):
x,y,w,h = face x,y,w,h = face
#Create blue rectangle around face of thickness 2 #Create blue rectangle around face of thickness 2
if writeRectangle:
cv2.rectangle(image, (x,y), (x+w,y+h), (255,0,0), 2 ) cv2.rectangle(image, (x,y), (x+w,y+h), (255,0,0), 2 )
#Select face image #Select face image
......
...@@ -79,6 +79,9 @@ def extractDataFromVideo_(filename, videoName, facesList, labelsList, maxNbrImag ...@@ -79,6 +79,9 @@ def extractDataFromVideo_(filename, videoName, facesList, labelsList, maxNbrImag
return facesList, labelsList return facesList, labelsList
# LOAD DATA # LOAD DATA
def loadAffwildData(maxNbrImages=10000000000, frameRate=1/20): def loadAffwildData(maxNbrImages=10000000000, frameRate=1/20):
......
...@@ -5,7 +5,7 @@ import imageProcess as ip ...@@ -5,7 +5,7 @@ import imageProcess as ip
import numpy as np import numpy as np
def loadExpWData(nbrMaxImages=float('inf'), onlyDetected=False): def loadExpWData(nbrMaxImages=float('inf'), onlyDetected=False, detectedFace=False):
print(f"\nCHARGEMENT DE {nbrMaxImages} DONNEES DEPUIS EXPW...") print(f"\nCHARGEMENT DE {nbrMaxImages} DONNEES DEPUIS EXPW...")
folderImages = 'data/expW/images/' folderImages = 'data/expW/images/'
fileLabels = 'data/expW/labels.lst' fileLabels = 'data/expW/labels.lst'
...@@ -24,7 +24,7 @@ def loadExpWData(nbrMaxImages=float('inf'), onlyDetected=False): ...@@ -24,7 +24,7 @@ def loadExpWData(nbrMaxImages=float('inf'), onlyDetected=False):
imageName, Id, top, left, right, bottom, cofidence, label = line.strip().split(' ') imageName, Id, top, left, right, bottom, cofidence, label = line.strip().split(' ')
image = cv2.imread(folderImages+imageName) image = cv2.imread(folderImages+imageName)
faceAccordingToDS = image[int(top):int(bottom), int(left):int(right)] faceAccordingToDS = image[int(top):int(bottom), int(left):int(right)]
facesDetected = ip.imageProcess(faceAccordingToDS, writeEmotion=False) facesDetected = ip.imageProcess(faceAccordingToDS, writeEmotion=False, writeRectangle=False)
#Suivi visuel (facultatif, fait un peu peur sans attendre 1000ms entre deux images...) #Suivi visuel (facultatif, fait un peu peur sans attendre 1000ms entre deux images...)
if False: if False:
...@@ -36,7 +36,7 @@ def loadExpWData(nbrMaxImages=float('inf'), onlyDetected=False): ...@@ -36,7 +36,7 @@ def loadExpWData(nbrMaxImages=float('inf'), onlyDetected=False):
if len(facesDetected) == 1 or not onlyDetected: #Otherwise no face were detected or a no-face was detected as face if len(facesDetected) == 1 or not onlyDetected: #Otherwise no face were detected or a no-face was detected as face
#Select in priority image detected by detector #Select in priority image detected by detector
if len(facesDetected) != 0: if len(facesDetected) != 0 and detectedFace:
face = facesDetected[0] face = facesDetected[0]
else: else:
face = faceAccordingToDS face = faceAccordingToDS
...@@ -48,13 +48,12 @@ def loadExpWData(nbrMaxImages=float('inf'), onlyDetected=False): ...@@ -48,13 +48,12 @@ def loadExpWData(nbrMaxImages=float('inf'), onlyDetected=False):
Y.append(label) #Emotion order is the same as fer2013. Y.append(label) #Emotion order is the same as fer2013.
nbrImages += 1 nbrImages += 1
else: print("Erreur pour la donnée : Aucun ou plusieurs visages détectés")
print(f"{nbrImages} données chargées depuis expW (sur {k} données traités).\n") print(f"{nbrImages} données chargées depuis expW (sur {k} données traités).", end='\r')
X = np.array(X) X = np.array(X)
Y = np.array(Y) Y = np.array(Y)
print('\n')
return X, Y return X, Y
......
...@@ -36,7 +36,7 @@ def loadFer2013Data(maxNbrImages=35887): ...@@ -36,7 +36,7 @@ def loadFer2013Data(maxNbrImages=35887):
print(f"\nCHARGEMENT DE {maxNbrImages} DONNEES DEPUIS FER2013 ...") print(f"\nCHARGEMENT DE {maxNbrImages} DONNEES DEPUIS FER2013 ...")
maxNbrImages = min(maxNbrImages, 35887) maxNbrImages = min(maxNbrImages, 35887)
filename = "data/fer2013.csv" filename = "data/fer2013/fer2013.csv"
emotions = ["Angry", "Disgust", "Fear", emotions = ["Angry", "Disgust", "Fear",
"Happy", "Sad", "Suprise", "Neutral"] "Happy", "Sad", "Suprise", "Neutral"]
......
...@@ -5,15 +5,17 @@ import imageProcess as ip ...@@ -5,15 +5,17 @@ import imageProcess as ip
from config import input_shape from config import input_shape
def extractDataFromVideo(filename, videoName, facesList, labelsList, maxNbrImages): def extractDataFromVideo(filename, videoName, facesList, labelsList, maxNbrImages, frameRate):
# Extract every faces in a specified video and add it to a list of faces as well as labels corresponding. # Extract every faces in a specified video and add it to a list of faces as well as labels corresponding.
# Start capture of a video # Start capture of a video
frameRate = 1
cap = cv2.VideoCapture(filename) cap = cv2.VideoCapture(filename)
if (cap.isOpened() == False): if (cap.isOpened() == False):
print("Error opening video stream or file") print("Error opening video stream or file")
emotions_ravdess = ["_", "Neutral", "Calm", "Happy",
"Sad", "Angry", "Fear", "Disgust", "Suprise"]
# Read until video is completed # Read until video is completed
k = 0 k = 0
while (cap.isOpened()): while (cap.isOpened()):
...@@ -21,9 +23,7 @@ def extractDataFromVideo(filename, videoName, facesList, labelsList, maxNbrImage ...@@ -21,9 +23,7 @@ def extractDataFromVideo(filename, videoName, facesList, labelsList, maxNbrImage
ret, frame = cap.read() ret, frame = cap.read()
if ret == True: if ret == True:
k += 1 k += 1
# If we overreach the maximum number of images desired, stop
if len(facesList) > maxNbrImages:
break
if k*frameRate >= 1: # Read a frame each N frames where N=1/frameRate if k*frameRate >= 1: # Read a frame each N frames where N=1/frameRate
k = 0 k = 0
...@@ -32,7 +32,7 @@ def extractDataFromVideo(filename, videoName, facesList, labelsList, maxNbrImage ...@@ -32,7 +32,7 @@ def extractDataFromVideo(filename, videoName, facesList, labelsList, maxNbrImage
# Ravdess emotions list is not in the same order as fer2013 (reference) # Ravdess emotions list is not in the same order as fer2013 (reference)
emotionNbr = int(videoName[7]) emotionNbr = int(videoName[7])
emotion = emotions[emotionNbr] emotion = emotions_ravdess[emotionNbr]
emotionNbr = emotionToNumber(emotion) emotionNbr = emotionToNumber(emotion)
# Detect faces on the image # Detect faces on the image
...@@ -45,8 +45,8 @@ def extractDataFromVideo(filename, videoName, facesList, labelsList, maxNbrImage ...@@ -45,8 +45,8 @@ def extractDataFromVideo(filename, videoName, facesList, labelsList, maxNbrImage
elif True: elif True:
print("Erreur pour la donnée : Aucun ou plusieurs visages détectés") print("Erreur pour la donnée : Aucun ou plusieurs visages détectés")
# Press Q on keyboard to exit # If we overreach the maximum number of images desired, stop
if cv2.waitKey(1) & 0xFF == ord('q'): if len(facesList) >= maxNbrImages:
break break
# Display the resulting frame # Display the resulting frame
...@@ -69,19 +69,21 @@ def extractDataFromVideo(filename, videoName, facesList, labelsList, maxNbrImage ...@@ -69,19 +69,21 @@ def extractDataFromVideo(filename, videoName, facesList, labelsList, maxNbrImage
# LOAD DATA # LOAD DATA
def loadRavdessData(maxNbrImages=10000000000): def loadRavdessData(maxNbrImages=10000000000, frameRate = 1/20):
print(f"\nCHARGEMENT DE {maxNbrImages} DONNEES DEPUIS RAVDESS...") print(f"\nCHARGEMENT DE {maxNbrImages} DONNEES DEPUIS RAVDESS...")
foldername = "data/ravdessTest/videos/" foldername = "data/ravdess/videos/"
emotions = ["_", "Neutral", "Calm", "Happy",
"Sad", "Angry", "Fear", "Disgust", "Suprise"]
facesList = [] facesList = []
labelsList = [] labelsList = []
maxNbrImages -= 1
# For each actor... # For each actor...
for actorName in os.listdir(foldername): for actorName in os.listdir(foldername):
# If we overreach the maximum number of images desired, stop
if len(facesList) >= maxNbrImages:
break
print(f"TRAITEMENT ACTEUR N°{actorName[-2:]}") print(f"TRAITEMENT ACTEUR N°{actorName[-2:]}")
videoNames = os.listdir(foldername+actorName) videoNames = os.listdir(foldername+actorName)
nbrOfVideos = len(videoNames) nbrOfVideos = len(videoNames)
...@@ -89,6 +91,7 @@ def loadRavdessData(maxNbrImages=10000000000): ...@@ -89,6 +91,7 @@ def loadRavdessData(maxNbrImages=10000000000):
k = 0 k = 0
# For each video... # For each video...
for videoName in videoNames: for videoName in videoNames:
# If we overreach the maximum number of images desired, stop # If we overreach the maximum number of images desired, stop
if len(facesList) >= maxNbrImages: if len(facesList) >= maxNbrImages:
break break
...@@ -97,17 +100,14 @@ def loadRavdessData(maxNbrImages=10000000000): ...@@ -97,17 +100,14 @@ def loadRavdessData(maxNbrImages=10000000000):
print(f"Traitement de {videoName}, video {k}/{nbrOfVideos}") print(f"Traitement de {videoName}, video {k}/{nbrOfVideos}")
filename = foldername+actorName+'/'+videoName filename = foldername+actorName+'/'+videoName
# Press Q on keyboard to exit ONE video
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if videoName[7] == '2': if videoName[7] == '2':
# Doesnt take Calm emotion into account # Doesnt take Calm emotion into account
print("Emotion 'Calme', non prise en compte") print("Emotion 'Calme', non prise en compte")
else:
elif videoName[7] in [str(n) for n in range(1, 9)]:
# Add datas extracted from the specified video to features and labels # Add datas extracted from the specified video to features and labels
facesList, labelsList = extractDataFromVideo( facesList, labelsList = extractDataFromVideo(
filename, videoName, facesList, labelsList, maxNbrImages) filename, videoName, facesList, labelsList, maxNbrImages, frameRate)
# List of colored images N*M*3 faces to array of gray images 48*48*1 # List of colored images N*M*3 faces to array of gray images 48*48*1
N = len(facesList) N = len(facesList)
......
#Use this file for test
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets, layers, models, losses
import tensorflow_datasets as tfds
#from google.colab import files
from matplotlib import image
import os
import numpy as np import numpy as np
import matplotlib.pyplot as plt A = np.array([ [[1,2],[3,4]] , [[5,6],[7,8]] , [[1,2],[3,4]]])
import matplotlib B = np.array([ [[1,2],[3,4]] , [[5,6],[7,8]] ])
import random as rd A = A.tolist()
import cv2 B = B.tolist()
import csv C = np.stack(A+B, axis = 0)
print(C.shape)
from loadFer2013DS import * \ No newline at end of file
from loadRavdessDS import *
from loadExpWDS import *
from loadAffwild import *
from utils import *
X, Y = loadFer2013Data(10)
W, Z = loadRavdessData(10)
A, B = loadExpWData(10)
C, D = loadAffwildData(10)
\ No newline at end of file
...@@ -42,8 +42,14 @@ def emotionToNumber(emotion): ...@@ -42,8 +42,14 @@ def emotionToNumber(emotion):
"Happy", "Sad", "Suprise", "Neutral"] "Happy", "Sad", "Suprise", "Neutral"]
return emotions.index(emotion) return emotions.index(emotion)
def stackImages(listOfArrayImage):
liste = []
for X in listOfArrayImage:
liste += X.tolist()
return np.stack(liste, axis=0)
def mergeToDatabase(listOfX, listOfY, validation_repart=[0.025, 0.025, 0.025, 0.025]):
def mergeToDatabase(listOfX, listOfY, validation_repart=[0.1, 0.1, 0.1, 0.1]):
# This shuffle each X, extract validation data, merge differents X, shuffle again. # This shuffle each X, extract validation data, merge differents X, shuffle again.
listOfX_train, listOfY_train = [], [] listOfX_train, listOfY_train = [], []
listOfX_test, listOfY_test = [], [] listOfX_test, listOfY_test = [], []
...@@ -55,18 +61,18 @@ def mergeToDatabase(listOfX, listOfY, validation_repart=[0.025, 0.025, 0.025, 0. ...@@ -55,18 +61,18 @@ def mergeToDatabase(listOfX, listOfY, validation_repart=[0.025, 0.025, 0.025, 0.
shuffler = np.random.permutation(N) shuffler = np.random.permutation(N)
X, Y = X[shuffler], Y[shuffler] X, Y = X[shuffler], Y[shuffler]
# Extract validation data # Extract validation data
X_train, Y_train = X[:N*(1-rate)], Y[:N*(1-rate)] X_train, Y_train = X[:int(N*(1-rate))], Y[:int(N*(1-rate))]
X_test, Y_test = X[N*(1-rate):], Y[N*(1-rate):] X_test, Y_test = X[int(N*(1-rate)):], Y[int(N*(1-rate)):]
listOfX_train.append(X_train) listOfX_train.append(X_train)
listOfY_train.append(Y_train) listOfY_train.append(Y_train)
listOfX_test.append(X_test) listOfX_test.append(X_test)
listOfY_test.append(Y_test) listOfY_test.append(Y_test)
# Merge # Merge
BigX_train = np.stack(listOfX_train) BigX_train = stackImages(listOfX_train)
BigY_train = np.stack(listOfY_train) BigY_train = stackImages(listOfY_train)
BigX_test = np.stack(listOfX_test) BigX_test = stackImages(listOfX_test)
BigY_test = np.stack(listOfY_test) BigY_test = stackImages(listOfY_test)
# Shuffle the whole # Shuffle the whole
shuffler = np.random.permutation(len(BigX_train)) shuffler = np.random.permutation(len(BigX_train))
BigX_train = BigX_train[shuffler] BigX_train = BigX_train[shuffler]
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment