Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision
  • master
1 result

Target

Select target project
No results found
Select Git revision
  • master
1 result
Show changes

Commits on Source 30

78 files
+ 1775
113
Compare changes
  • Side-by-side
  • Inline

Files

+3 −1
Original line number Diff line number Diff line
data
config
test
 No newline at end of file

.vscode/settings.json

0 → 100644
+3 −0
Original line number Diff line number Diff line
{
    "python.pythonPath": "C:\\Users\\timot\\AppData\\Local\\Microsoft\\WindowsApps\\python3.9.exe"
}
 No newline at end of file

README.md

0 → 100644
+15 −0
Original line number Diff line number Diff line
## Projet de détection d'expression faciales

### Description :

Le projet a pour but de construire un programme pour détecter les visages et leurs émotions associées depuis une image ou une vidéo prise en temps réelle. Les visages sont détectés et classifiés parmi 7 émotions : Angry, Disgust, Fear, Happy, Sad, Surprise et Neutral.

### Fichiers python à lancer :

videoCapture.py : prend une vidéo en entrée, traite chaque frame avec imageProcess.py et renvoie la vidéo traitée ainsi. Les visages sont détectés et classifiés.

game.py : lance une capture de la vidéo et des smileys à imiter le plus rapidement possible.
Paramètres de game(): 
    - playTime : durée du jeu   
    - dt_required : délai durant lequel l'émotion doit être reconnue en continu pour être validée
    - n_photos : nombre de photos souvenirs que le modèle prendra pendant le jeu, affichées à la fin
+581 −0

File added.

Preview size limit exceeded, changes collapsed.

+9 −0
Original line number Diff line number Diff line
# Name of model used
modelName = 'exp903'

# Emotions provided by the dataset
emotions = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"]

# Shape of input of the model
input_shape = (48, 48, 1)
# input_shape = (64,64,1)
+11 −10
Original line number Diff line number Diff line
#Objective of this file is to analyse a face
import keras
print("Chargement du modèle...")
import numpy as np
import cv2
from utils import *
emotions = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Suprise", "Neutral"]
input_shape = (48,48,1)
from config import emotions, input_shape, modelName

#model = tf.keras.models.load_model("models/"+modelName)	#Load our model
model = tf.saved_model.load("models/"+modelName)

print('Model used:', modelName)

def detectEmotion(face):
	#Return the most likely emotion there is on a 48x48x1 gray face
	#input = 48
	#Return the most likely emotion there is on a face
	
	face = normAndResize(face, input_shape)
	face = normAndResize(face, input_shape)		#Process our image for input of model

	model = keras.models.load_model('firstModel')	#Load our model
	emotionVect = predir(model, face)
	emotionNbr = np.argmax(emotionVect)			 
	emotion = emotions[emotionNbr]

game.py

0 → 100644
+119 −0
Original line number Diff line number Diff line
from time import monotonic_ns

from numpy.core.arrayprint import _make_options_dict


def smileyRandom(emotionToDodge):
    #Return a random smiley and te emotion associated

    import cv2
    import random
    from config import emotions

    emotionNbr = random.randrange(0,6)
    emotion = emotions[emotionNbr]
    if emotion == emotionToDodge: return smileyRandom(emotion)
    smileyImagePath = "data/smileys/"+emotion+".png"
    smiley = cv2.imread(smileyImagePath)
    return smiley, emotion


def game(playTime = 30, invincibleFrame=0.5, dt_required=0.5, n_photos=None):
    #Play a game.

    # playTime : durée de jeu
    # invincibleFrame : durée minimale entre deux émotions
    # dt_required : durée minimal nécessaire pour valider une émotion
    # n_photos : nombre de photos prises

    #Use your camera for processing the video. Stop by pressing Q
    import cv2
    import matplotlib.pyplot as plt
    import imageProcess as ip
    import time


    cap = cv2.VideoCapture(0)   #0 means we capture the first camera, your webcam probably
    score = 0       

    timeScoring = time.time()   #last instant an emotion was found.
    timeInitial = time.time()
    timeSinceOtherEmotions = time.time()
    timeLastPhoto = time.time()

    smiley, emotion = smileyRandom("")
    smileyNeutral = smiley.copy()
    photos= []





    while cap.isOpened():		
        ret, frame = cap.read()  #Read next video frame, stop if frame not well read
        if not ret: break
        
        emotionsList = ip.imageProcess(frame, returnEmotion=True)
        
        
        if time.time()-timeSinceOtherEmotions > dt_required:    #If emotions maintained for dt seconds, score is increased and a new smiley is generated
            score += 1
            smiley, emotion = smileyRandom(emotion)
            smileyNeutral = smiley.copy()
            timeScoring = time.time()
            timeSinceOtherEmotions = time.time()
        
        elif emotion in emotionsList and time.time()-timeScoring>invincibleFrame: #If emotion recognized, increase score, reset smiley to mimick, start timer for impossibility of scoring (0.5s)
            pass

        else:
            timeSinceOtherEmotions = time.time()




        #Modify and show photos
        smiley = smileyNeutral.copy()
        cv2.imshow("Caméra", frame)  			#Show you making emotional faces
        cv2.putText(smiley, "Score: "+str(score), (40,40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
        cv2.putText(smiley, "Timer: "+str(round(time.time()-timeInitial, 1)), (20,240), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2)
        cv2.imshow("Smiley", smiley)            #Show the smiley to mimic




        #Save temporarily photo:
        if n_photos is not None:
            if time.time()-timeLastPhoto > playTime/(n_photos+1):
                timeLastPhoto = time.time()
                photos.append(frame)

        #Stop game if Q pressd or time exceed play time.
        if cv2.waitKey(1) & 0xFF == ord('q'):			#If you press Q, stop the while and so the capture
            break   

        elif cv2.waitKey(1) & 0xFF == ord('p'):			#If you press P, pass the smiley but lower your score
            score -= 1
            smiley, emotion = smileyRandom(emotion)
            smileyNeutral = smiley.copy()
            timeScoring = time.time()
            timeSinceOtherEmotions = time.time()
        

        elif time.time() - timeInitial > playTime:
            break

    cap.release()
    cv2.destroyAllWindows()

    print(f"Jeu terminé ! Vous avez imité {score} emotions en {playTime} secondes !")
    if n_photos is not None:
        print("Voici quelques photos prises lors de votre performance =)")
        for photo in photos:
            plt.imshow(photo)
            plt.xticks([])
            plt.yticks([])
            plt.show()

if __name__ == "__main__":
    game()
+21 −17
Original line number Diff line number Diff line
@@ -2,11 +2,14 @@
import cv2
import numpy as np
import faceAnalysis as fa
input_shape = (48,48,1)
import timeit as ti

def imageProcess(image):
def imageProcess(image, writeEmotion=True, writeRectangle=True, returnEmotion=False):
    #Objectives : detect faces, identify emotion associated on it, modify the image by framing faces and writing their emotions associated
    
    facesList = []
    emotionsList = []

    #Import faces and eyes detectors from cv2
    face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades+'haarcascade_frontalface_default.xml')
    eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades+'haarcascade_eye.xml')
@@ -20,22 +23,22 @@ def imageProcess(image):
        x,y,w,h = face
        
        #Create blue rectangle around face of thickness 2
        if writeRectangle:
            cv2.rectangle(image, (x,y), (x+w,y+h), (255,0,0), 2 )
        
        #Select face image
        face_gray = gray[y:y+h, x:x+w]
        face_color = image[y:y+h, x:x+w]
        
        #Detect eyes on the face, create green rectangle
        eyes = eye_cascade.detectMultiScale(face_gray)
        for (ex,ey,ew,eh) in eyes:
            cv2.rectangle(face_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),1)
        facesList.append(face_color)

        #Write emotion on the image
        if writeEmotion:
            emotion = fa.detectEmotion(face_color)
            cv2.putText(image, emotion, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2)
            emotionsList.append(emotion)


    if returnEmotion: return emotionsList
    return facesList

def selectFace(image):
    #Return a face identified on an colored image
@@ -46,7 +49,7 @@ def selectFace(image):
    #Face detection is made on gray images
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    faces = face_cascade.detectMultiScale(gray, 1.3, 5) #This return a list of tuple locating faces on image
    faces = face_cascade.detectMultiScale(gray, 1.03, 5) #This return a list of tuple locating faces on image
    
    #The face returned is the first face detected on the image (if exists)
    if faces != []:
@@ -54,9 +57,10 @@ def selectFace(image):
        face = image[y:y+h, x:x+w]
        return face

image = cv2.imread("cagnol.jpg", 1)  #Load Cagnol colored image
imageProcess(image)
cv2.imshow("Cagnol", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
#Some tests here.
# image = cv2.imread("cagnol.jpg", 1)  #Load Cagnol colored image
# imageProcess(image)
# cv2.imshow("Cagnol", image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()

loadAffwildDS.py

0 → 100644
+133 −0
Original line number Diff line number Diff line
import os
import cv2
from utils import *
import imageProcess as ip
from config import input_shape


def extractDataFromVideo_(filename, videoName, facesList, labelsList, maxNbrImages, frameRate):
    # Extract every faces in a specified video and add it to a list of faces as well as labels corresponding.
    emotions = ["Neutral", "Angry", "Disgust",
                "Fear", "Happy", "Sad", "Suprise"]

    # Start capture of a video and reading of a label file. Dont change the lists if file can not be read.
    cap = cv2.VideoCapture(filename)
    if (cap.isOpened() == False):
        print("Error opening video")

    try:
        file = open("data/affwild/labels/"+videoName[:-4]+'.txt', 'r')
    except FileNotFoundError:
        return facesList, labelsList
    file.readline()

    # Read until video is completed
    k = 0
    while (cap.isOpened()):
        # Capture frame-by-frame
        ret, frame = cap.read()
        line = file.readline()

        if ret == True:
            k += 1

            if k*frameRate >= 1:  # Read a frame each N frames where N=1/frameRate
                k = 0

                # Load image and labels

                # Detect faces on the image
                newFaces = ip.imageProcess(frame, writeEmotion=False)

                # If 2 faces were detected, it means an error was made since there is only single-person videos here.
                # The second condition means the image is irrelevant (no face on the picture)
                if len(newFaces) == 1 and line[0] != '-':
                    facesList += newFaces

                    emotionNbr = emotionToNumber(emotions[int(line[0])])
                    labelsList.append(emotionNbr)
                elif False:
                    print(
                        "Erreur pour la donnée : Aucun ou plusieurs visages détectés", end='\r')

                # If we overreach the maximum number of images desired, stop
                if len(facesList) > maxNbrImages:
                    break

            # Press Q on keyboard to  exit
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

            # Display the resulting frame
            if False:
                cv2.imshow('AffWild data extraction...', frame)

        # Break the loop
        else:
            break

    # When everything done, release the video capture object
    cap.release()

    # Closes all the frames
    cv2.destroyAllWindows()

    # Close file
    file.close()

    # Return face and label lists with new datas
    return facesList, labelsList





# LOAD DATA

def loadAffwildData(maxNbrImages=10000000000, frameRate=1/20):
    print(f"\nCHARGEMENT DE {maxNbrImages} DONNEES DEPUIS AFFWILD...")

    foldername = "data/affwild/videos/"
    facesList = []
    labelsList = []
    maxNbrImages -= 1
    k = 0
    nbrOfVideos = len(os.listdir(foldername))

    # For each video...
    for videoName in os.listdir(foldername):

        # If we overreach the maximum number of images desired, stop
        if len(facesList) >= maxNbrImages:
            break

        elif videoName+'_left' in os.listdir("data/affwild/labels") or videoName+'_right' in os.listdir("data/affwild/labels"):
            print("Vidéo à deux visages, non pris en compte")

        else:
            k += 1
            print(f"Traitement de {videoName}, video {k}/{nbrOfVideos}")
            filename = foldername+videoName

            # Press Q on keyboard to exit ONE video
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

            # Add datas extracted from the specified video to features and labels
            facesList, labelsList = extractDataFromVideo_(
                filename, videoName, facesList, labelsList, maxNbrImages, frameRate)

    # List of colored images N*M*3 faces to array of gray images 48*48*1
    N = len(facesList)
    print(
        f"TRAITEMENT AFFWILD: traitement des {N} visages détectés sur les vidéos de AffWild...")

    for k in range(N):
        visage = facesList[k]
        facesList[k] = normAndResize(visage, input_shape)
    X = np.array(facesList)

    Y = np.array(labelsList)

    print(N, "données chargées depuis AffWild.")
    return X, Y
 No newline at end of file