From f6416bb8526403b7d52e0bdf0a71fd12d3a0b53b Mon Sep 17 00:00:00 2001
From: Vaek <timothe.boulet@student-cs.fr>
Date: Tue, 16 Mar 2021 20:22:43 +0100
Subject: [PATCH] Add video capture

---
 __pycache__/faceAnalysis.cpython-38.pyc | Bin 0 -> 263 bytes
 __pycache__/imageProcess.cpython-38.pyc | Bin 0 -> 980 bytes
 faceAnalysis.py                         |   4 +++
 imageProcess.py                         |  34 ++++++++++++++++++++++++
 videoCapture.py                         |  19 +++++++++++++
 5 files changed, 57 insertions(+)
 create mode 100644 __pycache__/faceAnalysis.cpython-38.pyc
 create mode 100644 __pycache__/imageProcess.cpython-38.pyc
 create mode 100644 faceAnalysis.py
 create mode 100644 videoCapture.py

diff --git a/__pycache__/faceAnalysis.cpython-38.pyc b/__pycache__/faceAnalysis.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d14458dbb8cdd0914e8ef56715e13b2c9c5f2ed7
GIT binary patch
literal 263
zcmWIL<>g{vU|^WT9GDQzz`*br#6iYP3=9ko3=9m#JPZsBDGVu$Eeuf%DNMl(n#{?J
zND4u05OxNsWdW&W3}(<|@~dL=NGvF*v|q`f$#{z;EipN@2&4>5{IYVkiU}=FEh>&F
z$;{0!iAhUL&P>eFO|2*>N-ZwV%+J$JNi9iDF3HT#1IeX2<|XD-7H1ag6;$5hg-E&P
r=7Xe}LDquY%D`9zvfWP;LWAu=B0%bHaoFVMr<CTT+JShVL7o5rUGz3y

literal 0
HcmV?d00001

diff --git a/__pycache__/imageProcess.cpython-38.pyc b/__pycache__/imageProcess.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3c58a46fd655f30741c583ce45752d298988015d
GIT binary patch
literal 980
zcmWIL<>g{vU|=}F5}0s`m4V?gh=Yuo7#J8F7#J9eWf&M3QW#Pga~Pr^G-DJ~3R4Pm
z3qurh3S%&XCd*5ZKEGr}5Cz3T3=9k)^PNE^NH8)mlrSt{EMdrE%3@Ao6lYk-Sj&hc
z!UPs!Dq&c_Qo~Tfkj1)?sg}8haRFNma|&|`%R=T_mKqiphS+nptTn7LEVXR4?6n-V
zoFxnk*lQSS*lO6*m}=N+IK&xh*lRe&8Pb?iSX0;*vNAH%FxRj{#9Bcrz-n5V(wHO}
zYPo7as=?|p<hW~?7O>TDEo7|aso`;9h}DZ>spYNZtL3j10Ncr3!&}3b##F;w!!OQI
z!&f5!wv#=D17v66o)V4)oHg7F8EXYg7#4u+5rnw0gdvMNg#+w1c1DImmlTFz22D=C
zD&>sC#G>TH;^f4X)cCZb{JfIHoV3K`)cBOtw8YY!61|GtoGM{tMX8mkAi?yrXF%YK
z{YzE`1_n*$m;V_U7>Ypg04837;!u+r%r0VvNxcNgYx3M;PA)ULCFl&X);TA!xHvN{
zGqvazOG;u%;w>H|>xyr2B$t&q=jY@X-Qsih_wf&kcXAIhau0Hhyd{v5T9TSv;#-<i
zk{O(wn3H;ovnVyWBrz{NC-oLnT4EG0M1gB=eo1D2-Yxcm(vpzWijrF*ZvK8D@gA;0
z!5*%W@xh+H0Y0t~nnF>mnYoGSsZl)O0DuNc6nAQ6YCMd?l3tWp8N~`xP#nbqqHZx(
z++wV}#aMoeF(Zl-BoYr+!v$t0gB%{kl3JNse2XcyB8n-sGKwj+Jc=nbBZ@s0;;xkp
zMPdvL48N?LtztrpQ;UjYN-}fvOJdRzlQR=@bW<w|ic*V<GxPIwA>PT%&x-*&G@vLy
zIkmW0ub}c453&F#Lcm#vfklW>h?$R(kC}s!hgpePhEax@i&214g_(=7NCFgXEJbV#
z3=9w-JLV<kR2F9z7x96j3nUG87ZM@Pz`$^e!zMRBr8Fni4iv-1EDQ_`9E==H0G*5m
ARsaA1

literal 0
HcmV?d00001

diff --git a/faceAnalysis.py b/faceAnalysis.py
new file mode 100644
index 0000000..bd4c220
--- /dev/null
+++ b/faceAnalysis.py
@@ -0,0 +1,4 @@
+#Objective of this file is to analyse a face
+
+def detectEmotion(face):
+	return "Happy?" 
\ No newline at end of file
diff --git a/imageProcess.py b/imageProcess.py
index e69de29..df0530f 100644
--- a/imageProcess.py
+++ b/imageProcess.py
@@ -0,0 +1,34 @@
+#File to process images
+import cv2
+import faceAnalysis as fa
+
+def imageProcess(image):
+    #Objectives : detect faces, identify emotion associated on it, modify the image by framing faces and writing their emotions associated
+    
+    #Import faces and eyes detectors from cv2
+    face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades+'haarcascade_frontalface_default.xml')
+    eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades+'haarcascade_eye.xml')
+
+    #CV2 detection is made on gray pictures
+    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
+    faces = face_cascade.detectMultiScale(gray, 1.3, 5) #This return a list of tuple locating faces on image
+
+    #For each face, detect eyes call imageProcess to process the face and modify the image
+    for face in faces:
+        x,y,w,h = face
+        
+        #Create blue rectangle around face of thickness 2
+        cv2.rectangle(image, (x,y), (x+w,y+h), (255,0,0), 2 )
+        
+        #Select face image
+        face_gray = gray[y:y+h, x:x+w]
+        face_color = image[y:y+h, x:x+w]
+        
+        #Detect eyes on the face, create green rectangle
+        eyes = eye_cascade.detectMultiScale(face_gray)
+        for (ex,ey,ew,eh) in eyes:
+            cv2.rectangle(face_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),1)
+
+        #Write emotion on the image
+        emotion = fa.detectEmotion(face_color)
+        cv2.putText(image, emotion, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2)
\ No newline at end of file
diff --git a/videoCapture.py b/videoCapture.py
new file mode 100644
index 0000000..7f1882e
--- /dev/null
+++ b/videoCapture.py
@@ -0,0 +1,19 @@
+#Use your camera for processing the video. Stop by pressing Q
+import cv2
+import imageProcess as ip
+
+cap = cv2.VideoCapture(0)   #0 means we capture the first camera, your webcam probably
+
+while cap.isOpened():
+    ret, frame = cap.read()  #Read next video frame, stop if frame not well read
+    if not ret: break       
+    
+    ip.imageProcess(frame)                          #Process frame
+    
+    cv2.imshow("Image traitée", frame)  			#Show processed image in a window
+
+    if cv2.waitKey(1) & 0xFF == ord('q'):			#If you press Q, stop the while and so the capture
+        break          
+
+cap.release()
+cv2.destroyAllWindows()
\ No newline at end of file
-- 
GitLab