diff --git a/__pycache__/imageProcess.cpython-38.pyc b/__pycache__/imageProcess.cpython-38.pyc
index 421798d2bd14fb0373f5881855f092994202292c..781d95caa203cb8b26f3f98f4c0cff872bc74a59 100644
Binary files a/__pycache__/imageProcess.cpython-38.pyc and b/__pycache__/imageProcess.cpython-38.pyc differ
diff --git a/__pycache__/loadAffwild.cpython-38.pyc b/__pycache__/loadAffwild.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..11042780dc77eb84fc05ad784113bfa993167f37
Binary files /dev/null and b/__pycache__/loadAffwild.cpython-38.pyc differ
diff --git a/__pycache__/loadExpWDS.cpython-38.pyc b/__pycache__/loadExpWDS.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d6b4a27eb33fc89601a41616ec26f7eb91261925
Binary files /dev/null and b/__pycache__/loadExpWDS.cpython-38.pyc differ
diff --git a/__pycache__/loadFer2013ds.cpython-38.pyc b/__pycache__/loadFer2013ds.cpython-38.pyc
index 08fa3949ccfa1ea775d20f33b432ec244d74ec5e..c01c5f18e516716bf499d2202d506dd2c2a25905 100644
Binary files a/__pycache__/loadFer2013ds.cpython-38.pyc and b/__pycache__/loadFer2013ds.cpython-38.pyc differ
diff --git a/__pycache__/loadRavdessDs.cpython-38.pyc b/__pycache__/loadRavdessDs.cpython-38.pyc
index e58c4aa7cbfe2ee2e1fbb4fa4c064806e15caa57..dee7ea69bad7a07207953feec090113a673aaaa8 100644
Binary files a/__pycache__/loadRavdessDs.cpython-38.pyc and b/__pycache__/loadRavdessDs.cpython-38.pyc differ
diff --git a/imageProcess.py b/imageProcess.py
index cba699ee0404a2f84d91f848d05e27a089659ea1..6719ce170e96bffe4b3909d81fcfda046ec69b7b 100644
--- a/imageProcess.py
+++ b/imageProcess.py
@@ -29,11 +29,6 @@ def imageProcess(image, writeEmotion=True):
         face_color = image[y:y+h, x:x+w]
         facesList.append(face_color)
 
-        #Detect eyes on the face, create green rectangle
-        eyes = eye_cascade.detectMultiScale(face_gray)
-        for (ex,ey,ew,eh) in eyes:
-            cv2.rectangle(face_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),1)
-
         #Write emotion on the image
         if writeEmotion:
             emotion = fa.detectEmotion(face_color)
diff --git a/loadAffwild.py b/loadAffwild.py
new file mode 100644
index 0000000000000000000000000000000000000000..c50f06902252db18cc8ad998794b83b3d0dd5053
--- /dev/null
+++ b/loadAffwild.py
@@ -0,0 +1,125 @@
+import os
+import cv2
+from utils import *
+import imageProcess as ip
+from config import input_shape
+
+
+def extractDataFromVideo_(filename, videoName, facesList, labelsList, maxNbrImages):
+        # Extract every faces in a specified video and add it to a list of faces as well as labels corresponding.
+        frameRate = 1/15
+        emotions = ["Neutral", "Angry", "Disgust", "Fear", "Happy", "Sad", "Suprise"]
+
+        # Start capture of a video and reading of a label file
+        cap = cv2.VideoCapture(filename)
+        if (cap.isOpened() == False):
+                print("Error opening video")
+
+        file = open("data/affwild/labels/"+videoName[:-4]+'.txt', 'r')
+        file.readline()
+
+        # Read until video is completed
+        k = 0
+        while (cap.isOpened()):
+                # Capture frame-by-frame
+                ret, frame = cap.read()
+                line = file.readline()
+
+                if ret == True:
+                        k += 1
+
+                        if k*frameRate >= 1:  # Read a frame each N frames where N=1/frameRate
+                                k = 0
+
+                                # Load image and labels
+
+                                #Detect faces on the image
+                                newFaces = ip.imageProcess(frame, writeEmotion=False)
+
+                                #If 2 faces were detected, it means an error was made since there is only single-person videos here.
+                                if len(newFaces) == 1:
+                                        facesList += newFaces
+
+                                        emotionNbr = emotionToNumber(emotions[int(line[0])])
+                                        labelsList.append(emotionNbr)
+                                        print("Donnée ajoutée, donnée :", len(facesList))
+                                elif True: print("Erreur pour la donnée : Aucun ou plusieurs visages détectés")
+
+                                # If we overreach the maximum number of images desired, stop
+                                if len(facesList) > maxNbrImages:
+                                        break
+
+                        # Press Q on keyboard to  exit
+                        if cv2.waitKey(1) & 0xFF == ord('q'):
+                                break
+
+                        # Display the resulting frame
+                        if False:
+                                cv2.imshow('AffWild data extraction...', frame)
+
+                # Break the loop
+                else:
+                        break
+
+        # When everything done, release the video capture object
+        cap.release()
+
+        # Closes all the frames
+        cv2.destroyAllWindows()
+
+        #Close file
+        file.close()
+
+        # Return face and label lists with new datas
+        return facesList, labelsList
+
+
+
+
+# LOAD DATA
+
+def loadAffwildData(maxNbrImages=10000000000):
+        print(f"\nCHARGEMENT DE {maxNbrImages} DONNEES DEPUIS AFFWILD...")
+
+        foldername = "data/affwild/videos/"
+        facesList = []
+        labelsList = []
+        k = 1
+        nbrOfVideos = len(os.listdir(foldername))
+
+        # For each video...
+        for videoName in os.listdir(foldername):
+
+                # If we overreach the maximum number of images desired, stop
+                if len(facesList) >= maxNbrImages:
+                        break
+
+                elif videoName+'_left' in os.listdir("data/affwild/labels") or videoName+'_right' in os.listdir("data/affwild/labels"):
+                        print("Vidéo à deux visages, non pris en compte")
+
+                else:
+                        k+=1
+                        print(f"Traitement de {videoName}, video {k}/{nbrOfVideos}")
+                        filename = foldername+videoName
+
+                        # Press Q on keyboard to exit ONE video
+                        if cv2.waitKey(1) & 0xFF == ord('q'):
+                                break
+
+                        #Add datas extracted from the specified video to features and labels
+                        facesList, labelsList = extractDataFromVideo_(
+                                filename, videoName, facesList, labelsList, maxNbrImages)
+
+        # List of colored images N*M*3 faces to array of gray images 48*48*1
+        N = len(facesList)
+        print(f"TRAITEMENT AFFWILD: traitement des {N} visages détectés sur les vidéos de AffWild...")
+
+        for k in range(N):
+                visage = facesList[k]
+                facesList[k] = normAndResize(visage, input_shape)
+        X = np.array(facesList)
+
+        Y = np.array(labelsList)
+
+        print(N, "données chargées depuis AffWild.")
+        return X, Y
diff --git a/loadExpWDS.py b/loadExpWDS.py
new file mode 100644
index 0000000000000000000000000000000000000000..f146b528de4268c6c472e49fac9b3f63dbf9dfa1
--- /dev/null
+++ b/loadExpWDS.py
@@ -0,0 +1,65 @@
+import cv2
+from utils import *
+from config import input_shape
+import imageProcess as ip
+import numpy as np
+
+
+def loadExpWData(nbrMaxImages=float('inf'), onlyDetected=False):
+	print(f"\nCHARGEMENT DE {nbrMaxImages} DONNEES DEPUIS EXPW...")
+	folderImages = 'data/expW/images/'
+	fileLabels = 'data/expW/labels.lst'
+
+	file = open(fileLabels, 'r')
+	nbrImages = 0
+	k = 0
+	X = []
+	Y = []
+
+	for line in file:
+		if nbrImages>=nbrMaxImages: break
+		k+= 1
+		
+		#Face extraction, according to the dataset annotations AND the face detector (cascade)
+		imageName, Id, top, left, right, bottom, cofidence, label = line.strip().split(' ')
+		image = cv2.imread(folderImages+imageName)
+		faceAccordingToDS = image[int(top):int(bottom), int(left):int(right)]     
+		facesDetected = ip.imageProcess(faceAccordingToDS, writeEmotion=False)
+		
+		#Suivi visuel (facultatif, fait un peu peur sans attendre 1000ms entre deux images...)
+		if False:
+			cv2.imshow("ExpW importation...", faceAccordingToDS)
+			if cv2.waitKey(1000) & 0xFF == ord('q'):
+				break
+
+		#Add extracted data to our dataset
+		if len(facesDetected) == 1 or not onlyDetected: #Otherwise no face were detected or a no-face was detected as face
+			
+			#Select in priority image detected by detector
+			if len(facesDetected) != 0:
+				face = facesDetected[0]
+			else:
+				face = faceAccordingToDS
+
+			#Colored N*M*3 face to gray 48*48*1 image.
+			gray = normAndResize(face, input_shape)	
+
+			X.append(gray)
+			Y.append(label)	#Emotion order is the same as fer2013.
+			
+			nbrImages += 1
+		else: print("Erreur pour la donnée : Aucun ou plusieurs visages détectés")
+
+	print(f"{nbrImages} données chargées depuis expW (sur {k} données traités).\n")
+
+	X = np.array(X)
+	Y = np.array(Y)
+
+	return X, Y
+
+
+
+
+
+
+
diff --git a/loadFer2013ds.py b/loadFer2013ds.py
index b10e6b8574f70fdeff4247dbf085b7700f22e966..434ebc910b26a6f537f844399b3edd7e08ecaa39 100644
--- a/loadFer2013ds.py
+++ b/loadFer2013ds.py
@@ -31,7 +31,8 @@ def strToArray(string):  # Fer2013 provides images as string so it needs to be t
 # LOAD DATA AS ARRAY
 
 def loadFer2013Data(maxNbrImages=35887):
-    c = 0
+    print(f"\nCHARGEMENT DE {maxNbrImages} DONNEES DEPUIS FER2013 ...")
+
     nbrImagesFer2013 = 35887
     filename = "data/fer2013.csv"
     emotions = ["Angry", "Disgust", "Fear",
@@ -56,8 +57,9 @@ def loadFer2013Data(maxNbrImages=35887):
             X.append(strToArray(stringImage))
             Y.append(emotionNbr)
 
-            print(f"Image {i} sur {maxNbrImages} chargée")
+            print(f"Donnée {i} sur {maxNbrImages} chargée", end='\r')
 
     X = np.array(X)
     Y = np.array(Y)
+    print(f"{maxNbrImages} données chargées depuis fer2013.")
     return X, Y
diff --git a/loadRavdessDs.py b/loadRavdessDs.py
index cb1db154152f931f64266f6d85545d8bed7e417d..f170ce25147b70e2d4d117e0693abf07699ae551 100644
--- a/loadRavdessDs.py
+++ b/loadRavdessDs.py
@@ -5,11 +5,10 @@ import imageProcess as ip
 from config import input_shape
 
 
-def extractDataFromVideo(filename, videoName, facesList, labelsList):
+def extractDataFromVideo(filename, videoName, facesList, labelsList, maxNbrImages):
     # Extract every faces in a specified video and add it to a list of faces as well as labels corresponding.
 
     # Start capture of a video
-    print("Lecture vidéo de", videoName)
     frameRate = 1
     cap = cv2.VideoCapture(filename)
     if (cap.isOpened() == False):
@@ -40,12 +39,14 @@ def extractDataFromVideo(filename, videoName, facesList, labelsList):
                 if len(newFaces) == 1:
                     facesList += newFaces
                     labelsList.append(emotionNbr)
-                    print("Donnée ajoutée, Images:", len(
-                        facesList), "Labels:", len(labelsList))
-                else: print("Erreur pour la donnée : Aucun ou plusieurs visages détectés")
+                elif True: print("Erreur pour la donnée : Aucun ou plusieurs visages détectés")
+
+                # If we overreach the maximum number of images desired, stop
+                if len(facesList) > maxNbrImages:
+                    break
 
             # Press Q on keyboard to  exit
-            if cv2.waitKey(25) & 0xFF == ord('q'):
+            if cv2.waitKey(1) & 0xFF == ord('q'):
                 break
 
             # Display the resulting frame
@@ -68,7 +69,8 @@ def extractDataFromVideo(filename, videoName, facesList, labelsList):
 
 # LOAD DATA
 
-def loadRavdessData(maxNbrImages=float('inf')):
+def loadRavdessData(maxNbrImages=10000000000):
+    print(f"\nCHARGEMENT DE {maxNbrImages} DONNEES DEPUIS RAVDESS...")
 
     foldername = "data/ravdessTest/videos/"
     emotions = ["_", "Neutral", "Calm", "Happy",
@@ -79,35 +81,36 @@ def loadRavdessData(maxNbrImages=float('inf')):
     # For each actor...
     for actorName in os.listdir(foldername):
 
-        # If we overreach the maximum number of images desired, stop
-        if len(facesList) > maxNbrImages:
-            break
-
-        print(f"\nTRAITEMENT ACTEUR N°{actorName[-2:]}\n")
+        print(f"TRAITEMENT ACTEUR N°{actorName[-2:]}")
         videoNames = os.listdir(foldername+actorName)
         nbrOfVideos = len(videoNames)
 
         k = 0
         # For each video...
         for videoName in videoNames:
+            # If we overreach the maximum number of images desired, stop
+            if len(facesList) >= maxNbrImages:
+                break
+
             k += 1
             print(f"Traitement de {videoName}, video {k}/{nbrOfVideos}")
             filename = foldername+actorName+'/'+videoName
 
             # Press Q on keyboard to exit ONE video
-            if cv2.waitKey(25) & 0xFF == ord('q'):
+            if cv2.waitKey(1) & 0xFF == ord('q'):
                 break
 
             if videoName[7] == '2':
                 # Doesnt take Calm emotion into account
                 print("Emotion 'Calme', non prise en compte")
             else:
+                #Add datas extracted from the specified video to features and labels
                 facesList, labelsList = extractDataFromVideo(
-                    filename, videoName, facesList, labelsList)
+                    filename, videoName, facesList, labelsList, maxNbrImages)
 
     # List of colored images N*M*3 faces to array of gray images 48*48*1
     N = len(facesList)
-    print(f"Traitement des {N} visages détectés sur les vidéos de Ravdess")
+    print(f"TRAITEMENT RAVDESS: traitement des {N} visages détectés sur les vidéos de Ravdess...")
 
     for k in range(N):
         visage = facesList[k]
@@ -116,5 +119,5 @@ def loadRavdessData(maxNbrImages=float('inf')):
 
     Y = np.array(labelsList)
 
-    print(N, " données chargées depuis Ravdess.")
+    print(N, "données chargées depuis Ravdess.")
     return X, Y
diff --git a/test.py b/test.py
index d376c9b1c99854b72af2eef61111cd6f17f309ed..afd0b49d75e49b3ad6f39385bb30c776fcbf8475 100644
--- a/test.py
+++ b/test.py
@@ -15,9 +15,13 @@ import random as rd
 import cv2
 import csv
 
-from loadFer2013ds import *
-from loadRavdessDs import *
+from loadFer2013DS import *
+from loadRavdessDS import *
+from loadExpWDS import *
+from loadAffwild import *
 from utils import *
 
-X, Y = loadFer2013Data(100)
-W, Z = loadRavdessData(100)
\ No newline at end of file
+# X, Y = loadFer2013Data(10)
+# W, Z = loadRavdessData(10)
+# A, B = loadExpWData(10)
+C, D = loadAffwildData(1000)
\ No newline at end of file