diff --git a/__pycache__/faceAnalysis.cpython-39.pyc b/__pycache__/faceAnalysis.cpython-39.pyc index 9da9aaba520f5915277c7b1f95b1d04246a2ea10..368b1bcddbeb049eab5511f2cb30b5052084d6ac 100644 Binary files a/__pycache__/faceAnalysis.cpython-39.pyc and b/__pycache__/faceAnalysis.cpython-39.pyc differ diff --git a/__pycache__/game.cpython-39.pyc b/__pycache__/game.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c404eea4190b7533106a8631dbc60cf05a1a5d5b Binary files /dev/null and b/__pycache__/game.cpython-39.pyc differ diff --git a/__pycache__/utils.cpython-39.pyc b/__pycache__/utils.cpython-39.pyc index e89e1b3c25003428354d2326b863319855205de8..a5d9d659083885aff989b884eaf6c8e88c6cd46f 100644 Binary files a/__pycache__/utils.cpython-39.pyc and b/__pycache__/utils.cpython-39.pyc differ diff --git a/__pycache__/videoCapture.cpython-39.pyc b/__pycache__/videoCapture.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b4efb37fafe3a967bb28295d3809bcd65d02650 Binary files /dev/null and b/__pycache__/videoCapture.cpython-39.pyc differ diff --git a/faceAnalysis.py b/faceAnalysis.py index 22660aa7a7c2ed28b20571b16966dec92e7ad3da..845f556e6f685b937d0f66b5e4d0b63c54894931 100644 --- a/faceAnalysis.py +++ b/faceAnalysis.py @@ -1,7 +1,6 @@ #Objective of this file is to analyse a face -import keras +print("Chargement du modèle...") import numpy as np -import cv2 from utils import * from config import emotions, input_shape, modelName diff --git a/game.py b/game.py index 9ae95912cc87e57466ff45c1074d2c4ea46613c1..02491389f5ca6a7c3b35a09538ec52cc1e96bd12 100644 --- a/game.py +++ b/game.py @@ -1,15 +1,10 @@ -#Use your camera for processing the video. Stop by pressing Q -import cv2 -import imageProcess as ip -import faceAnalysis as fa -import random -from config import emotions +def smileyRandom(emotionToDodge): + #Return a random smiley and te emotion associated -cap = cv2.VideoCapture(0) #0 means we capture the first camera, your webcam probably -score = 0 -N = 15 + import cv2 + import random + from config import emotions -def smileyRandom(emotionToDodge): emotionNbr = random.randrange(0,6) emotion = emotions[emotionNbr] if emotion == emotionToDodge: return smileyRandom(emotion) @@ -17,32 +12,95 @@ def smileyRandom(emotionToDodge): smiley = cv2.imread(smileyImagePath) return smiley, emotion -smiley, emotion = smileyRandom("") -while cap.isOpened(): #or while 1. cap.isOpened() is false if there is a problem - ret, frame = cap.read() #Read next video frame, stop if frame not well read - if not ret: break - - emotionsList = ip.imageProcess(frame, returnEmotion=True) - - if emotion in emotionsList: #If emotion recognized, increase score, reset smiley to mimick and write "GG!" - score += 1 - cv2.putText(smiley, "Emotion reconnue !", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 2) - cv2.imshow("Smiley", smiley) - smiley, emotion = smileyRandom(emotion) +def game(playTime = 30, invincibleFrame=0.5, dt_required=0.5, n_photos=None): + #Play a game during playTime seconds. + + #Use your camera for processing the video. Stop by pressing Q + import cv2 + import matplotlib.pyplot as plt + import imageProcess as ip + import time + + + cap = cv2.VideoCapture(0) #0 means we capture the first camera, your webcam probably + score = 0 + + timeScoring = time.time() #last instant an emotion was found. + timeInitial = time.time() + timeSinceOtherEmotions = time.time() + timeLastPhoto = time.time() + + smiley, emotion = smileyRandom("") + smileyNeutral = smiley.copy() + photos= [] + + + + + + while cap.isOpened(): #or while 1. cap.isOpened() is false if there is a problem + ret, frame = cap.read() #Read next video frame, stop if frame not well read + if not ret: break + + emotionsList = ip.imageProcess(frame, returnEmotion=True) + + + if time.time()-timeSinceOtherEmotions > dt_required: #If emotions maintained for dt seconds, score is increased and a new smiley is generated + score += 1 + smiley, emotion = smileyRandom(emotion) + smileyNeutral = smiley.copy() + timeScoring = time.time() + timeSinceOtherEmotions = time.time() + + elif emotion in emotionsList and time.time()-timeScoring>invincibleFrame: #If emotion recognized, increase score, reset smiley to mimick, start timer for impossibility of scoring (0.5s) + pass + + else: + timeSinceOtherEmotions = time.time() + + + + + #Modify and show photos + smiley = smileyNeutral.copy() + cv2.imshow("Caméra", frame) #Show you making emotional faces + cv2.putText(smiley, "Score: "+str(score), (40,40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2) + cv2.putText(smiley, "Timer: "+str(time.time()-timeInitial), (20,240), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2) + cv2.imshow("Smiley", smiley) #Show the smiley to mimic + + + + #Save temporarily photo: + if n_photos is not None: + if time.time()-timeLastPhoto > playTime/(n_photos+1): + timeLastPhoto = time.time() + photos.append(frame) - cv2.imshow("Caméra", frame) #Show you making emotional faces - cv2.putText(smiley, "Score: "+str(score), (40,40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2) - cv2.imshow("Smiley", smiley) #Show the smiley to mimic + #Stop game if Q pressd or time exceed play time. + if cv2.waitKey(1) & 0xFF == ord('q'): #If you press Q, stop the while and so the capture + break - if cv2.waitKey(1) & 0xFF == ord('q'): #If you press Q, stop the while and so the capture - break + elif cv2.waitKey(1) & 0xFF == ord('p'): #If you press P, pass the smiley but lower your score + score -= 1 + smiley, emotion = smileyRandom(emotion) + smileyNeutral = smiley.copy() + timeScoring = time.time() + timeSinceOtherEmotions = time.time() + - elif cv2.waitKey(1) & 0xFF == ord('p'): #If you press P, pass the smiley but lower your score - score -= 1 - smiley, emotion = smileyRandom(emotion) + elif time.time() - timeInitial > playTime: + break + cap.release() + cv2.destroyAllWindows() -cap.release() -cv2.destroyAllWindows() \ No newline at end of file + print(f"Jeu terminé ! Vous avez imité {score} emotions en {playTime} secondes !") + if n_photos is not None: + print("Voici quelques photos prises lors de votre performance =)") + for photo in photos: + plt.imshow(photo) + plt.xticks([]) + plt.yticks([]) + plt.show() \ No newline at end of file diff --git a/main.py b/main.py new file mode 100644 index 0000000000000000000000000000000000000000..4ca55580fd6415bbabc9e438b2085bd79a40d59e --- /dev/null +++ b/main.py @@ -0,0 +1,6 @@ +from game import * +from videoCapture import * + +game(playTime=300, invincibleFrame=5, dt_required=0.5, n_photos=5) + +#videoCapture() \ No newline at end of file diff --git a/videoCapture.py b/videoCapture.py index f2b8ce9a9dd19ee55efc62e6f13f2822ec01036d..6c27ee3f87054f0368c66472c397c2f8750f1d36 100644 --- a/videoCapture.py +++ b/videoCapture.py @@ -1,19 +1,23 @@ -#Use your camera for processing the video. Stop by pressing Q -import cv2 -import imageProcess as ip -cap = cv2.VideoCapture(0) #0 means we capture the first camera, your webcam probably -while cap.isOpened(): #or while 1. cap.isOpened() is false if there is a problem - ret, frame = cap.read() #Read next video frame, stop if frame not well read - if not ret: break +def videoCapture(): - ip.imageProcess(frame) #Process frame + #Use your camera for processing the video. Stop by pressing Q + import cv2 + import imageProcess as ip - cv2.imshow("Image traitée", frame) #Show processed image in a window + cap = cv2.VideoCapture(0) #0 means we capture the first camera, your webcam probably - if cv2.waitKey(1) & 0xFF == ord('q'): #If you press Q, stop the while and so the capture - break + while cap.isOpened(): #or while 1. cap.isOpened() is false if there is a problem + ret, frame = cap.read() #Read next video frame, stop if frame not well read + if not ret: break -cap.release() -cv2.destroyAllWindows() \ No newline at end of file + ip.imageProcess(frame) #Process frame + + cv2.imshow("Image traitée", frame) #Show processed image in a window + + if cv2.waitKey(1) & 0xFF == ord('q'): #If you press Q, stop the while and so the capture + break + + cap.release() + cv2.destroyAllWindows() \ No newline at end of file