diff --git a/__pycache__/faceAnalysis.cpython-38.pyc b/__pycache__/faceAnalysis.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d14458dbb8cdd0914e8ef56715e13b2c9c5f2ed7
Binary files /dev/null and b/__pycache__/faceAnalysis.cpython-38.pyc differ
diff --git a/__pycache__/imageProcess.cpython-38.pyc b/__pycache__/imageProcess.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3c58a46fd655f30741c583ce45752d298988015d
Binary files /dev/null and b/__pycache__/imageProcess.cpython-38.pyc differ
diff --git a/faceAnalysis.py b/faceAnalysis.py
new file mode 100644
index 0000000000000000000000000000000000000000..bd4c220055fa7a11efa0020497680c699f404b80
--- /dev/null
+++ b/faceAnalysis.py
@@ -0,0 +1,4 @@
+#Objective of this file is to analyse a face
+
+def detectEmotion(face):
+	return "Happy?" 
\ No newline at end of file
diff --git a/imageProcess.py b/imageProcess.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..df0530fc2948904b77d7c8cefe866d643caeb925 100644
--- a/imageProcess.py
+++ b/imageProcess.py
@@ -0,0 +1,34 @@
+#File to process images
+import cv2
+import faceAnalysis as fa
+
+def imageProcess(image):
+    #Objectives : detect faces, identify emotion associated on it, modify the image by framing faces and writing their emotions associated
+    
+    #Import faces and eyes detectors from cv2
+    face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades+'haarcascade_frontalface_default.xml')
+    eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades+'haarcascade_eye.xml')
+
+    #CV2 detection is made on gray pictures
+    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
+    faces = face_cascade.detectMultiScale(gray, 1.3, 5) #This return a list of tuple locating faces on image
+
+    #For each face, detect eyes call imageProcess to process the face and modify the image
+    for face in faces:
+        x,y,w,h = face
+        
+        #Create blue rectangle around face of thickness 2
+        cv2.rectangle(image, (x,y), (x+w,y+h), (255,0,0), 2 )
+        
+        #Select face image
+        face_gray = gray[y:y+h, x:x+w]
+        face_color = image[y:y+h, x:x+w]
+        
+        #Detect eyes on the face, create green rectangle
+        eyes = eye_cascade.detectMultiScale(face_gray)
+        for (ex,ey,ew,eh) in eyes:
+            cv2.rectangle(face_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),1)
+
+        #Write emotion on the image
+        emotion = fa.detectEmotion(face_color)
+        cv2.putText(image, emotion, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2)
\ No newline at end of file
diff --git a/videoCapture.py b/videoCapture.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f1882ec0ada39d3ef6c9b02688d96b0274e38a9
--- /dev/null
+++ b/videoCapture.py
@@ -0,0 +1,19 @@
+#Use your camera for processing the video. Stop by pressing Q
+import cv2
+import imageProcess as ip
+
+cap = cv2.VideoCapture(0)   #0 means we capture the first camera, your webcam probably
+
+while cap.isOpened():
+    ret, frame = cap.read()  #Read next video frame, stop if frame not well read
+    if not ret: break       
+    
+    ip.imageProcess(frame)                          #Process frame
+    
+    cv2.imshow("Image traitée", frame)  			#Show processed image in a window
+
+    if cv2.waitKey(1) & 0xFF == ord('q'):			#If you press Q, stop the while and so the capture
+        break          
+
+cap.release()
+cv2.destroyAllWindows()
\ No newline at end of file