diff --git a/__pycache__/config.cpython-38.pyc b/__pycache__/config.cpython-38.pyc
index 471c95965035cdce13edb474b59abfc461665a7b..f667d52d50682fd90f0c34d5b6fd734bff3a0b9d 100644
Binary files a/__pycache__/config.cpython-38.pyc and b/__pycache__/config.cpython-38.pyc differ
diff --git a/__pycache__/faceAnalysis.cpython-38.pyc b/__pycache__/faceAnalysis.cpython-38.pyc
index c2abc466fece5a4f3e797496771b33cc71240755..9f490d631e2138b55f30ba5a9d7f17d4f1f2cb74 100644
Binary files a/__pycache__/faceAnalysis.cpython-38.pyc and b/__pycache__/faceAnalysis.cpython-38.pyc differ
diff --git a/__pycache__/imageProcess.cpython-38.pyc b/__pycache__/imageProcess.cpython-38.pyc
index 781d95caa203cb8b26f3f98f4c0cff872bc74a59..53b4dad37b52bfc2af8772bb87fbf50f79d7c6ff 100644
Binary files a/__pycache__/imageProcess.cpython-38.pyc and b/__pycache__/imageProcess.cpython-38.pyc differ
diff --git a/__pycache__/utils.cpython-38.pyc b/__pycache__/utils.cpython-38.pyc
index 57fbdac2f0607c0440babc0ab57b50f66387e555..9809462551379d39029aa2917c6bf08efa505b07 100644
Binary files a/__pycache__/utils.cpython-38.pyc and b/__pycache__/utils.cpython-38.pyc differ
diff --git a/faceAnalysis.py b/faceAnalysis.py
index bbea9d365fb80688a51b87f8840608fcfdfe2a5c..22660aa7a7c2ed28b20571b16966dec92e7ad3da 100644
--- a/faceAnalysis.py
+++ b/faceAnalysis.py
@@ -5,7 +5,9 @@ import cv2
 from utils import *
 from config import emotions, input_shape, modelName
 
-model = keras.models.load_model("models/"+modelName)	#Load our model
+#model = tf.keras.models.load_model("models/"+modelName)	#Load our model
+model = tf.saved_model.load("models/"+modelName)
+
 print('Model used:', modelName)
 
 def detectEmotion(face):
@@ -16,4 +18,4 @@ def detectEmotion(face):
 	emotionVect = predir(model, face)
 	emotionNbr = np.argmax(emotionVect)			 
 	emotion = emotions[emotionNbr]
-	return emotion
\ No newline at end of file
+	return emotion
diff --git a/game.py b/game.py
index 9ae95912cc87e57466ff45c1074d2c4ea46613c1..771db3e1db0bad5deeedc521488d6972be829550 100644
--- a/game.py
+++ b/game.py
@@ -3,9 +3,10 @@ import cv2
 import imageProcess as ip
 import faceAnalysis as fa
 import random
+from time import sleep
 from config import emotions
 
-cap = cv2.VideoCapture(0)   #0 means we capture the first camera, your webcam probably
+cap = cv2.VideoCapture(4)   #0 means we capture the first camera, your webcam probably
 score = 0
 N = 15
 
@@ -32,9 +33,12 @@ while cap.isOpened():		 #or while 1. cap.isOpened() is false if there is a probl
         smiley, emotion = smileyRandom(emotion)
 
 
-    cv2.imshow("Caméra", frame)  			#Show you making emotional faces
+    cv2.imshow("Camera", frame)  			#Show you making emotional faces
     cv2.putText(smiley, "Score: "+str(score), (40,40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
     cv2.imshow("Smiley", smiley)            #Show the smiley to mimic
+    #sleep(0.5)
+    
+   
 
     if cv2.waitKey(1) & 0xFF == ord('q'):			#If you press Q, stop the while and so the capture
         break   
@@ -45,4 +49,4 @@ while cap.isOpened():		 #or while 1. cap.isOpened() is false if there is a probl
 
 
 cap.release()
-cv2.destroyAllWindows()
\ No newline at end of file
+cv2.destroyAllWindows()
diff --git a/utils.py b/utils.py
index 1df51ee384a9e31f88ed2d1211f5f0f48fa0e5ab..fb5b0eb580d2772288400b480ccc61d75a56a731 100644
--- a/utils.py
+++ b/utils.py
@@ -18,8 +18,8 @@ def afficher(image):
 
 def predir(modele, image):
     # Return output of image from modele
-    return modele.predict(np.array([image]))[0, :]
-
+    #return modele.predict(np.array([image]))[0, :]
+    return modele(np.array([image]))[0, :]
 
 def normAndResize(image, input_shape):
     # For an array image of shape (a,b,c) or (a,b), transform it into (h,l,p). Also normalize it.