Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision
  • master
1 result

Target

Select target project
  • automatants/facial-expression-detection
  • 2021romandfra/facial-expression-detection
2 results
Select Git revision
  • master
1 result
Show changes
smileys/Angry.png

50.1 KiB

smileys/Disgust.png

54.8 KiB

smileys/Disgust2.png

56.9 KiB

smileys/Fear.png

63 KiB

smileys/Happy.png

56.6 KiB

smileys/Neutral.png

51.4 KiB

smileys/Sad.png

58.3 KiB

smileys/Surprise.png

56 KiB

#Use this file for test
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets, layers, models, losses
import tensorflow_datasets as tfds
#from google.colab import files
from matplotlib import image
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import random as rd
import cv2
import csv
from loadFer2013DS import *
from loadRavdessDS import *
from loadExpWDS import *
from loadAffwild import *
from utils import *
# X, Y = loadFer2013Data(10)
# W, Z = loadRavdessData(10)
# A, B = loadExpWData(10)
C, D = loadAffwildData(1000)
\ No newline at end of file
while 1:
if cv2.waitKey(0) & 0xFF== ord('q'): #If you press Q, stop the while and so the capture
break
if cv2.waitKey(1) & 0xFF == ord('p'): #If you press P, pass the smiley but lower your score
score -= 1
smiley, emotion = smileyRandom(emotion)
print(1)
cv2.waitKey(2)
\ No newline at end of file
......@@ -2,6 +2,8 @@ import numpy as np
import cv2
import matplotlib.pyplot as plt
from config import emotions
import tensorflow as tf
def afficher(image):
if len(image.shape) == 3:
......@@ -16,14 +18,15 @@ def afficher(image):
def predir(modele, image):
# Return output of image from modele
return modele.predict(np.array([image]))[0, :]
#return modele.predict(np.array([image]))[0, :]
return modele(np.array([image]))[0, :]
def normAndResize(image, input_shape):
# For an array image of shape (a,b,c) or (a,b), transform it into (h,l,p). Also normalize it.
h, l, p = input_shape
# resize for h and l #
# resize for h and l
# print(image.shape) #
image = cv2.resize(image, dsize=(h, l), interpolation=cv2.INTER_CUBIC)
# if we want (h,l,3) -> (h,l,1) , we first transform it in to (h,l) (grey the image)
if len(image.shape) == 3 and p == 1 and image.shape[2] != 1:
......@@ -34,6 +37,50 @@ def normAndResize(image, input_shape):
return image
def emotionToNumber(emotion):
emotions = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Suprise", "Neutral"]
emotions = ["Angry", "Disgust", "Fear",
"Happy", "Sad", "Suprise", "Neutral"]
return emotions.index(emotion)
def stackImages(listOfArrayImage):
liste = []
for X in listOfArrayImage:
liste += X.tolist()
return np.stack(liste, axis=0)
def mergeToDatabase(listOfX, listOfY, validation_repart=[0.1, 0.1, 0.1, 0.1]):
# This shuffle each X, extract validation data, merge differents X, shuffle again.
listOfX_train, listOfY_train = [], []
listOfX_test, listOfY_test = [], []
for X, Y, rate in zip(listOfX, listOfY, validation_repart):
N = X.shape[0]
# Shuffle each X and Y the same way
shuffler = np.random.permutation(N)
X, Y = X[shuffler], Y[shuffler]
# Extract validation data
X_train, Y_train = X[:int(N*(1-rate))], Y[:int(N*(1-rate))]
X_test, Y_test = X[int(N*(1-rate)):], Y[int(N*(1-rate)):]
listOfX_train.append(X_train)
listOfY_train.append(Y_train)
listOfX_test.append(X_test)
listOfY_test.append(Y_test)
# Merge
BigX_train = stackImages(listOfX_train)
BigY_train = stackImages(listOfY_train)
BigX_test = stackImages(listOfX_test)
BigY_test = stackImages(listOfY_test)
# Shuffle the whole
shuffler = np.random.permutation(len(BigX_train))
BigX_train = BigX_train[shuffler]
BigY_train = BigY_train[shuffler]
shuffler = np.random.permutation(len(BigX_test))
BigX_test = BigX_test[shuffler]
BigY_test = BigY_test[shuffler]
return BigX_train, BigY_train, BigX_test, BigY_test
def videoCapture():
#Use your camera for processing the video. Stop by pressing Q
import cv2
import imageProcess as ip
......@@ -10,7 +14,7 @@ while cap.isOpened(): #or while 1. cap.isOpened() is false if there is a probl
ip.imageProcess(frame) #Process frame
cv2.imshow("Image traitée", frame) #Show processed image in a window
cv2.imshow("Image", frame) #Show processed image in a window
if cv2.waitKey(1) & 0xFF == ord('q'): #If you press Q, stop the while and so the capture
break
......