diff --git a/__pycache__/config.cpython-38.pyc b/__pycache__/config.cpython-38.pyc
index 2511b0a724b8a00cd71c25ba1527a5e5b063c1fa..471c95965035cdce13edb474b59abfc461665a7b 100644
Binary files a/__pycache__/config.cpython-38.pyc and b/__pycache__/config.cpython-38.pyc differ
diff --git a/__pycache__/faceAnalysis.cpython-38.pyc b/__pycache__/faceAnalysis.cpython-38.pyc
index 9596825d0e8f78890bb75cb2ab44f7d1cd332dbf..c2abc466fece5a4f3e797496771b33cc71240755 100644
Binary files a/__pycache__/faceAnalysis.cpython-38.pyc and b/__pycache__/faceAnalysis.cpython-38.pyc differ
diff --git a/__pycache__/imageProcess.cpython-38.pyc b/__pycache__/imageProcess.cpython-38.pyc
index e472b46d427044482fe50ccf851170e7656a01a2..421798d2bd14fb0373f5881855f092994202292c 100644
Binary files a/__pycache__/imageProcess.cpython-38.pyc and b/__pycache__/imageProcess.cpython-38.pyc differ
diff --git a/__pycache__/loadFer2013ds.cpython-38.pyc b/__pycache__/loadFer2013ds.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..08fa3949ccfa1ea775d20f33b432ec244d74ec5e
Binary files /dev/null and b/__pycache__/loadFer2013ds.cpython-38.pyc differ
diff --git a/__pycache__/loadRavdessDs.cpython-38.pyc b/__pycache__/loadRavdessDs.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e58c4aa7cbfe2ee2e1fbb4fa4c064806e15caa57
Binary files /dev/null and b/__pycache__/loadRavdessDs.cpython-38.pyc differ
diff --git a/__pycache__/utils.cpython-38.pyc b/__pycache__/utils.cpython-38.pyc
index 7cc32c3a9dd6cc08a3f8747f0e523c76e61937a7..eb8bbc6317920b51516da144ef65c317a32d9963 100644
Binary files a/__pycache__/utils.cpython-38.pyc and b/__pycache__/utils.cpython-38.pyc differ
diff --git a/buildEmotionModel.ipynb b/buildEmotionModel.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..c5b961d428e901a5cd982ef5b808ae04d7866d9a
--- /dev/null
+++ b/buildEmotionModel.ipynb
@@ -0,0 +1,212 @@
+{
+ "metadata": {
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.8.5-final"
+  },
+  "orig_nbformat": 2,
+  "kernelspec": {
+   "name": "python385jvsc74a57bd031f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6",
+   "display_name": "Python 3.8.5 64-bit"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2,
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {},
+   "outputs": [
+    {
+     "output_type": "stream",
+     "name": "stdout",
+     "text": [
+      "Model used: firstModel\n",
+      "Image 1 sur 100 chargée\n",
+      "Image 2 sur 100 chargée\n",
+      "Image 3 sur 100 chargée\n",
+      "Image 4 sur 100 chargée\n",
+      "Image 5 sur 100 chargée\n",
+      "Image 6 sur 100 chargée\n",
+      "Image 7 sur 100 chargée\n",
+      "Image 8 sur 100 chargée\n",
+      "Image 9 sur 100 chargée\n",
+      "Image 10 sur 100 chargée\n",
+      "Image 11 sur 100 chargée\n",
+      "Image 12 sur 100 chargée\n",
+      "Image 13 sur 100 chargée\n",
+      "Image 14 sur 100 chargée\n",
+      "Image 15 sur 100 chargée\n",
+      "Image 16 sur 100 chargée\n",
+      "Image 17 sur 100 chargée\n",
+      "Image 18 sur 100 chargée\n",
+      "Image 19 sur 100 chargée\n",
+      "Image 20 sur 100 chargée\n",
+      "Image 21 sur 100 chargée\n",
+      "Image 22 sur 100 chargée\n",
+      "Image 23 sur 100 chargée\n",
+      "Image 24 sur 100 chargée\n",
+      "Image 25 sur 100 chargée\n",
+      "Image 26 sur 100 chargée\n",
+      "Image 27 sur 100 chargée\n",
+      "Image 28 sur 100 chargée\n",
+      "Image 29 sur 100 chargée\n",
+      "Image 30 sur 100 chargée\n",
+      "Image 31 sur 100 chargée\n",
+      "Image 32 sur 100 chargée\n",
+      "Image 33 sur 100 chargée\n",
+      "Image 34 sur 100 chargée\n",
+      "Image 35 sur 100 chargée\n",
+      "Image 36 sur 100 chargée\n",
+      "Image 37 sur 100 chargée\n",
+      "Image 38 sur 100 chargée\n",
+      "Image 39 sur 100 chargée\n",
+      "Image 40 sur 100 chargée\n",
+      "Image 41 sur 100 chargée\n",
+      "Image 42 sur 100 chargée\n",
+      "Image 43 sur 100 chargée\n",
+      "Image 44 sur 100 chargée\n",
+      "Image 45 sur 100 chargée\n",
+      "Image 46 sur 100 chargée\n",
+      "Image 47 sur 100 chargée\n",
+      "Image 48 sur 100 chargée\n",
+      "Image 49 sur 100 chargée\n",
+      "Image 50 sur 100 chargée\n",
+      "Image 51 sur 100 chargée\n",
+      "Image 52 sur 100 chargée\n",
+      "Image 53 sur 100 chargée\n",
+      "Image 54 sur 100 chargée\n",
+      "Image 55 sur 100 chargée\n",
+      "Image 56 sur 100 chargée\n",
+      "Image 57 sur 100 chargée\n",
+      "Image 58 sur 100 chargée\n",
+      "Image 59 sur 100 chargée\n",
+      "Image 60 sur 100 chargée\n",
+      "Image 61 sur 100 chargée\n",
+      "Image 62 sur 100 chargée\n",
+      "Image 63 sur 100 chargée\n",
+      "Image 64 sur 100 chargée\n",
+      "Image 65 sur 100 chargée\n",
+      "Image 66 sur 100 chargée\n",
+      "Image 67 sur 100 chargée\n",
+      "Image 68 sur 100 chargée\n",
+      "Image 69 sur 100 chargée\n",
+      "Image 70 sur 100 chargée\n",
+      "Image 71 sur 100 chargée\n",
+      "Image 72 sur 100 chargée\n",
+      "Image 73 sur 100 chargée\n",
+      "Image 74 sur 100 chargée\n",
+      "Image 75 sur 100 chargée\n",
+      "Image 76 sur 100 chargée\n",
+      "Image 77 sur 100 chargée\n",
+      "Image 78 sur 100 chargée\n",
+      "Image 79 sur 100 chargée\n",
+      "Image 80 sur 100 chargée\n",
+      "Image 81 sur 100 chargée\n",
+      "Image 82 sur 100 chargée\n",
+      "Image 83 sur 100 chargée\n",
+      "Image 84 sur 100 chargée\n",
+      "Image 85 sur 100 chargée\n",
+      "Image 86 sur 100 chargée\n",
+      "Image 87 sur 100 chargée\n",
+      "Image 88 sur 100 chargée\n",
+      "Image 89 sur 100 chargée\n",
+      "Image 90 sur 100 chargée\n",
+      "Image 91 sur 100 chargée\n",
+      "Image 92 sur 100 chargée\n",
+      "Image 93 sur 100 chargée\n",
+      "Image 94 sur 100 chargée\n",
+      "Image 95 sur 100 chargée\n",
+      "Image 96 sur 100 chargée\n",
+      "Image 97 sur 100 chargée\n",
+      "Image 98 sur 100 chargée\n",
+      "Image 99 sur 100 chargée\n",
+      "Image 100 sur 100 chargée\n",
+      "\n",
+      "TRAITEMENT ACTEUR N°14\n",
+      "\n",
+      "Traitement de 01-01-03-02-02-02-01.mp4, video 1/2\n",
+      "Lecture vidéo de 01-01-03-02-02-02-01.mp4\n",
+      "Donnée ajoutée, Images: 1 Labels: 1\n"
+     ]
+    }
+   ],
+   "source": [
+    "#@title Imports\n",
+    "#%load_ext autoreload  #Need to uncomment for import, dont understand\n",
+    "\n",
+    "import tensorflow as tf\n",
+    "from tensorflow import keras\n",
+    "from tensorflow.keras import datasets, layers, models, losses\n",
+    "import tensorflow_datasets as tfds\n",
+    "#from google.colab import files\n",
+    "\n",
+    "from matplotlib import image\n",
+    "import os\n",
+    "import numpy as np\n",
+    "import matplotlib.pyplot as plt\n",
+    "import matplotlib\n",
+    "import random as rd\n",
+    "import cv2\n",
+    "import csv\n",
+    "\n",
+    "from loadFer2013ds import *\n",
+    "from loadRavdessDs import *\n",
+    "from utils import *\n",
+    "\n",
+    "X, Y = loadFer2013Data(100)\n",
+    "W, Z = loadRavdessData(100)"
+   ]
+  },
+  {
+   "source": [],
+   "cell_type": "code",
+   "metadata": {},
+   "execution_count": null,
+   "outputs": []
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "#@title Hyperparamètres\n",
+    "classes = [\"Angry\", \"Disgust\", \"Fear\", \"Happy\", \"Sad\", \"Suprise\", \"Neutral\"]\n",
+    "Na = len(classes)\n",
+    "maxNbrImagesForEachClasses = float('inf')\n",
+    "h = 48\n",
+    "l = 48\n",
+    "p = 1\n",
+    "input_shape = (h, l, p)\n",
+    "\n",
+    "epochs = 5\n",
+    "batch_size = 128\n",
+    "validation_size = 0.1"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ]
+}
\ No newline at end of file
diff --git a/config.py b/config.py
index be19af0f9ae99b37ac9a72d907d388e1cd787b42..7fd6a4d5521d365c4abb414768bac616b01f6733 100644
--- a/config.py
+++ b/config.py
@@ -1,8 +1,8 @@
-#Name of model used
+# Name of model used
 modelName = 'firstModel'
 
-#Emotions provided by the dataset
+# Emotions provided by the dataset
 emotions = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Suprise", "Neutral"]
 
-#Shape of input of the model
-input_shape = (48,48,1)
\ No newline at end of file
+# Shape of input of the model
+input_shape = (48, 48, 1)
diff --git a/imageProcess.py b/imageProcess.py
index a773b4d5b448d284166a75b54707929f66637535..cba699ee0404a2f84d91f848d05e27a089659ea1 100644
--- a/imageProcess.py
+++ b/imageProcess.py
@@ -4,9 +4,11 @@ import numpy as np
 import faceAnalysis as fa
 import timeit as ti
 
-def imageProcess(image):
+def imageProcess(image, writeEmotion=True):
     #Objectives : detect faces, identify emotion associated on it, modify the image by framing faces and writing their emotions associated
     
+    facesList = []
+
     #Import faces and eyes detectors from cv2
     face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades+'haarcascade_frontalface_default.xml')
     eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades+'haarcascade_eye.xml')
@@ -25,16 +27,19 @@ def imageProcess(image):
         #Select face image
         face_gray = gray[y:y+h, x:x+w]
         face_color = image[y:y+h, x:x+w]
-        
+        facesList.append(face_color)
+
         #Detect eyes on the face, create green rectangle
         eyes = eye_cascade.detectMultiScale(face_gray)
         for (ex,ey,ew,eh) in eyes:
             cv2.rectangle(face_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),1)
 
         #Write emotion on the image
-        emotion = fa.detectEmotion(face_color)
-        cv2.putText(image, emotion, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2)
+        if writeEmotion:
+            emotion = fa.detectEmotion(face_color)
+            cv2.putText(image, emotion, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2)
 
+    return facesList
 
 def selectFace(image):
     #Return a face identified on an colored image
@@ -45,7 +50,7 @@ def selectFace(image):
     #Face detection is made on gray images
     gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
 
-    faces = face_cascade.detectMultiScale(gray, 1.3, 5) #This return a list of tuple locating faces on image
+    faces = face_cascade.detectMultiScale(gray, 1.03, 5) #This return a list of tuple locating faces on image
     
     #The face returned is the first face detected on the image (if exists)
     if faces != []:
diff --git a/loadFer2013ds.py b/loadFer2013ds.py
index 18189926ac7470df269dd1d366c7196237191434..b10e6b8574f70fdeff4247dbf085b7700f22e966 100644
--- a/loadFer2013ds.py
+++ b/loadFer2013ds.py
@@ -1,62 +1,63 @@
-#This file load the dataset fer2013 as arrays. 
+# This file load the dataset fer2013 as arrays.
 import csv
 import numpy as np
 import cv2
 import matplotlib.pyplot as plt
 
 
-nbrImages = 35887
-maxNbrImages = nbrImages
-emotions = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Suprise", "Neutral"]
+def strToArray(string):  # Fer2013 provides images as string so it needs to be transformed
+    A = []
+    lenght = len(string)
+    i = 0
+    nbr = ""
 
-def strToArray(string):  #Fer2013 provides images as string so it needs to be transformed
-	A = []
-	lenght = len(string)
-	i=0
-	nbr = ""
+    while i < lenght:
+        car = string[i]
 
-	while i<lenght:
-		car = string[i]
+        if car != " ":
+            nbr += car
+        else:
+            A.append(int(nbr))
+            nbr = ""
+        i += 1
+    A.append(int(nbr))
 
-		if car != " ":
-			nbr += car
-		else:
-			A.append(int(nbr))
-			nbr = ""
-		i+=1
-	A.append(int(nbr))
-	
-	A = np.array(A)
-	A = np.reshape(A, (48, 48))
+    A = np.array(A)
+    A = np.reshape(A, (48, 48))
 
-	return A
+    return A
 
 
+# LOAD DATA AS ARRAY
 
-#LOAD DATA AS ARRAY
-X = []
-Y = []
+def loadFer2013Data(maxNbrImages=35887):
+    c = 0
+    nbrImagesFer2013 = 35887
+    filename = "data/fer2013.csv"
+    emotions = ["Angry", "Disgust", "Fear",
+                "Happy", "Sad", "Suprise", "Neutral"]
 
-filename = "data/fer2013.csv"
+    X = []
+    Y = []
 
-with open(filename,'r',encoding='utf-8') as file:
-	
-	csv_reader = csv.reader(file, delimiter=",")
-	next(csv_reader)  								#Passe la ligne de titre
-	
-	i=0
-	for row in csv_reader:
+    with open(filename, 'r', encoding='utf-8') as file:
 
-		i+=1
-		if i>maxNbrImages: break
-		
-		emotionNbr, stringImage, typeImage = row
-		traitement(emotionNbr, stringImage, typeImage)
+        csv_reader = csv.reader(file, delimiter=",")
+        next(csv_reader)  # Passe la ligne de titre
 
-		X.append(strToArray(stringImage))
-		Y.append(emotionNbr)
+        i = 0
+        for row in csv_reader:
+            i += 1
+            if i > maxNbrImages:
+                break
 
-		print(f"Image {i} sur {nbrImages} chargée", end='\r')
+            emotionNbr, stringImage, typeImage = row
 
-X = np.array(X)
-Y = np.array(Y)
\ No newline at end of file
+            X.append(strToArray(stringImage))
+            Y.append(emotionNbr)
+
+            print(f"Image {i} sur {maxNbrImages} chargée")
+
+    X = np.array(X)
+    Y = np.array(Y)
+    return X, Y
diff --git a/loadRavdessDs.py b/loadRavdessDs.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb1db154152f931f64266f6d85545d8bed7e417d
--- /dev/null
+++ b/loadRavdessDs.py
@@ -0,0 +1,120 @@
+import os
+import cv2
+from utils import *
+import imageProcess as ip
+from config import input_shape
+
+
+def extractDataFromVideo(filename, videoName, facesList, labelsList):
+    # Extract every faces in a specified video and add it to a list of faces as well as labels corresponding.
+
+    # Start capture of a video
+    print("Lecture vidéo de", videoName)
+    frameRate = 1
+    cap = cv2.VideoCapture(filename)
+    if (cap.isOpened() == False):
+        print("Error opening video stream or file")
+
+    # Read until video is completed
+    k = 0
+    while (cap.isOpened()):
+        # Capture frame-by-frame
+        ret, frame = cap.read()
+        if ret == True:
+            k += 1
+
+            if k*frameRate >= 1:  # Read a frame each N frames where N=1/frameRate
+                k = 0
+
+                # Load image and labels
+
+                # Ravdess emotions list is not in the same order as fer2013 (reference)
+                emotionNbr = int(videoName[7])
+                emotion = emotions[emotionNbr]
+                emotionNbr = emotionToNumber(emotion)
+
+                #Detect faces on the image
+                newFaces = ip.imageProcess(frame, writeEmotion=False)
+
+                #If 2 faces were detected, it means an error was made since there is only single-person videos.
+                if len(newFaces) == 1:
+                    facesList += newFaces
+                    labelsList.append(emotionNbr)
+                    print("Donnée ajoutée, Images:", len(
+                        facesList), "Labels:", len(labelsList))
+                else: print("Erreur pour la donnée : Aucun ou plusieurs visages détectés")
+
+            # Press Q on keyboard to  exit
+            if cv2.waitKey(25) & 0xFF == ord('q'):
+                break
+
+            # Display the resulting frame
+            if True:
+                cv2.imshow('Frame', frame)
+
+        # Break the loop
+        else:
+            break
+
+    # When everything done, release the video capture object
+    cap.release()
+
+    # Closes all the frames
+    cv2.destroyAllWindows()
+
+    # Return face and label lists with new datas
+    return facesList, labelsList
+
+
+# LOAD DATA
+
+def loadRavdessData(maxNbrImages=float('inf')):
+
+    foldername = "data/ravdessTest/videos/"
+    emotions = ["_", "Neutral", "Calm", "Happy",
+                "Sad", "Angry", "Fear", "Disgust", "Suprise"]
+    facesList = []
+    labelsList = []
+
+    # For each actor...
+    for actorName in os.listdir(foldername):
+
+        # If we overreach the maximum number of images desired, stop
+        if len(facesList) > maxNbrImages:
+            break
+
+        print(f"\nTRAITEMENT ACTEUR N°{actorName[-2:]}\n")
+        videoNames = os.listdir(foldername+actorName)
+        nbrOfVideos = len(videoNames)
+
+        k = 0
+        # For each video...
+        for videoName in videoNames:
+            k += 1
+            print(f"Traitement de {videoName}, video {k}/{nbrOfVideos}")
+            filename = foldername+actorName+'/'+videoName
+
+            # Press Q on keyboard to exit ONE video
+            if cv2.waitKey(25) & 0xFF == ord('q'):
+                break
+
+            if videoName[7] == '2':
+                # Doesnt take Calm emotion into account
+                print("Emotion 'Calme', non prise en compte")
+            else:
+                facesList, labelsList = extractDataFromVideo(
+                    filename, videoName, facesList, labelsList)
+
+    # List of colored images N*M*3 faces to array of gray images 48*48*1
+    N = len(facesList)
+    print(f"Traitement des {N} visages détectés sur les vidéos de Ravdess")
+
+    for k in range(N):
+        visage = facesList[k]
+        facesList[k] = normAndResize(visage, input_shape)
+    X = np.array(facesList)
+
+    Y = np.array(labelsList)
+
+    print(N, " données chargées depuis Ravdess.")
+    return X, Y
diff --git a/test.py b/test.py
index 1f8a1660a7b5194fe9c393e4a9138049cd9bafd1..d376c9b1c99854b72af2eef61111cd6f17f309ed 100644
--- a/test.py
+++ b/test.py
@@ -1 +1,23 @@
-#Use this file for test
\ No newline at end of file
+#Use this file for test
+
+import tensorflow as tf
+from tensorflow import keras
+from tensorflow.keras import datasets, layers, models, losses
+import tensorflow_datasets as tfds
+#from google.colab import files
+
+from matplotlib import image
+import os
+import numpy as np
+import matplotlib.pyplot as plt
+import matplotlib
+import random as rd
+import cv2
+import csv
+
+from loadFer2013ds import *
+from loadRavdessDs import *
+from utils import *
+
+X, Y = loadFer2013Data(100)
+W, Z = loadRavdessData(100)
\ No newline at end of file
diff --git a/utils.py b/utils.py
index d15c29486bab40e3238bc7318de61a3660a7ca66..0bb6258c798cdd8d66ea335ddbf68b0b9d47ea74 100644
--- a/utils.py
+++ b/utils.py
@@ -1,6 +1,7 @@
 import numpy as np
 import cv2
 import matplotlib.pyplot as plt
+from config import emotions
 
 def afficher(image):
     if len(image.shape) == 3:
@@ -29,7 +30,10 @@ def normAndResize(image, input_shape):
         image = image.mean(2)
     image = np.reshape(image, (h, l, p))  # restore third dimension
     image = image.astype("float32")
-    image = image/255  # normalisation
+    image = (image/127.5)-1  # normalisation
 
     return image
 
+def emotionToNumber(emotion):
+    emotions = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Suprise", "Neutral"]
+    return emotions.index(emotion)