From 1665a5a3bf344371a97dbd4efa60d7762f005799 Mon Sep 17 00:00:00 2001
From: Vaek <timothe.boulet@student-cs.fr>
Date: Tue, 27 Apr 2021 21:41:40 +0200
Subject: [PATCH] add loadRadvess dataset

---
 __pycache__/config.cpython-38.pyc        | Bin 285 -> 289 bytes
 __pycache__/faceAnalysis.cpython-38.pyc  | Bin 630 -> 630 bytes
 __pycache__/imageProcess.cpython-38.pyc  | Bin 1336 -> 1405 bytes
 __pycache__/loadFer2013ds.cpython-38.pyc | Bin 0 -> 1291 bytes
 __pycache__/loadRavdessDs.cpython-38.pyc | Bin 0 -> 2241 bytes
 __pycache__/utils.cpython-38.pyc         | Bin 1037 -> 1287 bytes
 buildEmotionModel.ipynb                  | 212 +++++++++++++++++++++++
 config.py                                |   8 +-
 imageProcess.py                          |  15 +-
 loadFer2013ds.py                         |  87 +++++-----
 loadRavdessDs.py                         | 120 +++++++++++++
 test.py                                  |  24 ++-
 utils.py                                 |   6 +-
 13 files changed, 418 insertions(+), 54 deletions(-)
 create mode 100644 __pycache__/loadFer2013ds.cpython-38.pyc
 create mode 100644 __pycache__/loadRavdessDs.cpython-38.pyc
 create mode 100644 buildEmotionModel.ipynb
 create mode 100644 loadRavdessDs.py

diff --git a/__pycache__/config.cpython-38.pyc b/__pycache__/config.cpython-38.pyc
index 2511b0a724b8a00cd71c25ba1527a5e5b063c1fa..471c95965035cdce13edb474b59abfc461665a7b 100644
GIT binary patch
delta 137
zcmbQsw2+B6l$V!_fq{X6!@VQn;Y40X&s$t+nMK7VzWFJsIk#9H^U{kdZ?U^%7N?gM
zm)v4;OHC}g#p;n*P*8b`IXE%p7JG1MK~ZLL>MeG^)Y6io#2iiLmj(<B3`Lw1ryI!e
mfmk5omxX?AUWtCPerR!OQL%nWW^R6o{=|pUjQSJ*DgXdsST3Oe

delta 157
zcmZ3;G?$4tl$V!_fq{XcGATXb)<j-M&nT|6%%b8F-~5!+oG4buy!4{VD0Y|3;`Gwu
zk|-9p)Wo7FR*%Gjg32i7;KY<D_TbWjqRisdD0aWp(vqUY98Knz1`G@gMT`@t8<??y
zSRmq;iL+HqXmM&$aZE{OZhlEjT4Hi$VvcTVML|(&adBpTo^DENNosOQW`17GWJX2>
E0QDO(WB>pF

diff --git a/__pycache__/faceAnalysis.cpython-38.pyc b/__pycache__/faceAnalysis.cpython-38.pyc
index 9596825d0e8f78890bb75cb2ab44f7d1cd332dbf..c2abc466fece5a4f3e797496771b33cc71240755 100644
GIT binary patch
delta 20
bcmeyy@{NT%l$V!_fq{Xcx1@F>cQz9MIco(V

delta 20
bcmeyy@{NT%l$V!_fq{X++ctY6cQz9MHTeXU

diff --git a/__pycache__/imageProcess.cpython-38.pyc b/__pycache__/imageProcess.cpython-38.pyc
index e472b46d427044482fe50ccf851170e7656a01a2..421798d2bd14fb0373f5881855f092994202292c 100644
GIT binary patch
delta 768
zcmdnN^_NRKl$V!_fq{WxB}+#_3nv4^V-N=!GczzSI503U6q`-dR;g!CVNPLbVT@u=
zVNGFcVTj^LVGL%_<ai0v;TMw31fro>gn@wpWVAC#vlb%*Lpnn(QwhTY#uA1srYz<Z
zMsbFPjJ3>2B1~WrmJ)^qEHw-z3|XuTnQB>Um>00su!5{z$Xr{`R>S7P5PPSVy@ox8
zrIw?XvzDutyM$o@dksSkM-68hQw>KAmpDTWXAQSFLmCsvl?z!J8ERN-I3Z%KAQfOW
ztxRc5k_@#xH6Ybs^%!!zHB1XQYIqkiGBVV#EMTkQNn6NR%U8qa!Vs$$!&1v%D^M#~
zD+IQlw}!t)AdRVpzeZ4;p+=xa2y8uj3J1vg!c#SjMII%b3%Edr)e4s|Ea0wTs1b%Z
zxr8B$Cxrv-RCY#&LXR4z$@3WP8Pg}<W7HO4VPIe=Vq{=ocnKmjc_&LUrLeOl78IoB
zrA)42GFBA1#hRI$n4Wrzr@Sb$B-J%Hza%q1?-plTVsdJ+PiAq+<n2t+lDC*rD{nES
zmPawAX53;=g(zCdP^2_jn>k2J1r%k>EX+cTLd<-Oe9Rn-Jj_bWGK?~eT+CdI0*orm
zER0-?a!f@kljk!}u1~KPS?B(+>Vtg|C~<&Yr^#N#4st06h~NYfpr|k62C;ZR1TO;v
zgC=K@Fo-P%BE&(21c;CV5z-(+21Lk$2uTp31Sgak7#MDG6{qH;CYQLeBqpcoOy0;M
z&aDbElrcn;vxt521r|m1D7KQ!+|<mHD5jFkB9NzwKq`t5=73qslO<T?n7C8{H#ne2

delta 707
zcmey%wS!ALl$V!_fq{X6IW;|jg_D8dF^GeVnHU%t92giFicKbJtJE{4Ft;#7v8S-4
zu(mKnailN?Gib8C1gZB+W&}}CEX2UT05aDZq*H>CfuV$90b>b67E=~;3Zpo~LdIG~
zBoQXC2vZ5e0+t$v5{4|+g-o@~HH-_`YM4`)Q&<)<*Rs^GxG=<?tFL9PVU1y_WvgYc
z<*4N>VOYRk!%)Lk!=A=e!&bu~&QQZ%!zs>?#+1UE!nTl=k)ejUh8-f-3Q_@9)5?^_
zB*{?ARRdBDR*xaaUBk41t%hqMV=Ye&j|)SrUJOeuZ!KSKAb+g@*i!Bq-Wt9%rW)QF
zesP8xz8V3rrR*sjAWIAPlyEHItl?hBSSwh<umEI{AjFX+3|ZVM9AL+>Gcpvqq%ch0
z%xKT}eDY^TZ2?vW28JR~Aie|<nmm*Bm{KOsVlv_oy2YBAo0y(D`7KklWE4|sWfW6t
zc@$G>MihH$ZhlE-e%?xkBH77_%t1;rpzvoAViaQLW8`DzVB}#|VwPc)Vdi2KU{qn|
zVl0xMe3yB$U=cIOE*200wu^mo7mHkd5g!8sgC=K@Fi2bkM2Lb2F%TgEA|yeC6o`-p
z5#k_17EZ`9FfiQWDo)KwO)hauOion?#XZPK1{NV^J|>WJ1Q=x)ixeQXaDbGv=9T6a
wRNi9BD~RHmtja2=ev7RnGdDG}<Q7v&W|2I|Y>@IIge_o}+~inRIVKJT0M!bMSpWb4

diff --git a/__pycache__/loadFer2013ds.cpython-38.pyc b/__pycache__/loadFer2013ds.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..08fa3949ccfa1ea775d20f33b432ec244d74ec5e
GIT binary patch
literal 1291
zcmWIL<>g{vU|@LY(UDNh%E0g##6iZ)3=9ko3=9m#RtyXbDGVu$ISf${nlXwA%x8{b
z2GcCLtWj)?3@J=0%q<L2>?!OitSM|Qj8PmZjKK_=94|qp`Xw`hC@5xQU|?WjU|?_t
znP0`gz>v;R%UHrt!_drF%ap=c%bdbg%TmK!!<5ZX6jZ}d!z{^A%UZ*l!ko=qWLCpc
z!)ne@%aSKl!?=L4gsFz5nQ<W_$bOKX6c%%aTINC#RJju71uQj;3mIz}OPCk1)-a~9
zE@Y}@tO2=?&94aLv|9|n7!_WESei^P4Hy_0idY#K7&O^#G3TV_-C|2DC`ipqxy78B
zS8|IfuizGIVo_0I<t_H2)Z&c9f>cemTWrN8MVWc&w-_C7vE`)ZrDv4fV$8h7oR?H|
zi#a*5XeC3D5Ca3lFMIvmyb}Fn{m|mnqGJ7$%-sAE{j|j7%)}hs)QW<l)Z*gI{5;*1
z)RNTXlFa-({ha*76t~nOBLhR@lw!St%3EB;B}F0mjv(tfK*1*m3R6ZFMh-?6MiwRx
zMh+$pMlL2kMixdcMm{DkMjoaj5e5c^m-;;n3@DMn$H2hA35$d?ph#w{WlUkJWlCYL
zWlmuMMPdqj3P%cO3RgONElWB>Eo(YMEn5jg4RZ>23Qr1eFLQrcEqe)L7E=v-3SSC;
zFH<c?33ClcGbl0`YdLE;To_^lYPo7SQv}QzYB_5-YZ$UwigFkk3YXPz#W2@$*Yecz
z*03&MDPgVQfka{r+X9vvZcucVu%!sraL#5(5vpOB&5$BImlb4gVMsWGDMKJb5JLn*
z3Ht($8rFr3wX9$k+d{@#wi?zNwlt<-22Bx2v=)gnFfddJq$HLk>Zd`1Q7^f;>=vtI
zUV2gGEq0g8;`Gwul3Ofpsfk6mSUnO83My|g2PdZ7Vh=7YD9S8Oy~XaAT3S++m~)G<
zsEW0;Bu&>slkpZuYF=`FN@iaAFGd|r##@{zsX3XsnI)-3MWA$D#paotn4YRo#i~$T
zTBJ}43TB1mjKre!!z)uYId8G#7o_IhVg@;cttd4yCAH`lOI~V4NfA3J;en#-7HdIK
zW?o4VNM{i!qiXWq;>k^{@JlKJ8(n;hG5MB29*hS~A-6cvGILV%5_3~;air$vgA!2j
zEyjpjjFGokKr*+ul8ejYAvPBAGcYjRVlK)rzr_Vr>z7n?iyM-R!Is?OEU7F=1v9~k
z7eWYw^zkF5OP9owL`6_iG=U~X7Dg6EHby1}F#ONL3`&>+Oni(SOrT`S0ZOV&pd`x0
zq{PI<$i-M>49aSZ;JDJ{EK&klz?xT@TTlu1|1IX^GNW69xrrqOIr$|ynMryDl_1tF
vHYm+pkW&Iq<Y0p!t^{Q^2n+094x8Nkl+v73J0_4rK>-SK1_vV#GlwJq;@m`$

literal 0
HcmV?d00001

diff --git a/__pycache__/loadRavdessDs.cpython-38.pyc b/__pycache__/loadRavdessDs.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e58c4aa7cbfe2ee2e1fbb4fa4c064806e15caa57
GIT binary patch
literal 2241
zcmWIL<>g{vU|`^j??_1HXJB{?;vi#Y1_lNP1_p*=4+aK?6owSW9EK<e%^1a)!jQt0
z!xRGHGe@zcFr+Z&u;#Kwu`x2Fu%xiIFhsGZa-^^|Go~@7u%~deFh+5vFa|Sda=rxF
z;it)Xi&0CH@fLSxUO{O|d~rr%L25D!ND7L17#JAX7#J9wK`!89WMC*^NMWpDY-VC)
zNMWjFDPdf|RKu{4v6i)lbpi82h7{&(rXrmZh7^`&Mn;Ad)><}*c+s~S)&(pJ8Df}f
z*=sp!*i+cDnTo#Eu%$4WGt{!xu+^}LFr+YLvlg8J=_z5YVN79{WN2or<t$;V;gn>k
z<tkyX;R5qHvN&rvQkYXXdYNjuOSo#dn;BD>vYCpG)G*a>n={lh)i5vMuHjtB$Ov*P
zX9-sgQ!`@<R|!`Qb2DQz3nN1wM+rj;H^_Y@j0<>Dcos6I@H#M*@TTxJGiEatJt_QF
z!nlAhg};VlArm7*p+VudLW3IC1^f#c7#T_!7YKmp8m1cNG^St%O##1BK?Vi}pVZ`%
z(xOy_vdomjEAtgnQeQGMFfh1PDY+IE<rgXB7o_H8=B0zAQ}Y#yONvqxa~1N76w)$t
zQj0(;Lte5oFfeE`M)8yvWtOD6=H{1V=I514GB7Z><mcrbUYV+pn3Z2za(HE`j)G@y
zVtQ(^RTaBWVp3{Ou~n%Z0|SF=QBi7XkwQU!X^}!sqCyH(mz9EJX>w_vLVl@2K~8CL
zW@>3su|ip9F~|gkl*21aQj<##uPlBEvgzf21_p*(jD@#Y-HH-(Q#A!|u@)3%=9S!H
zPA)UL#S@m9lA7<FSOD_qEso4$|AN%K)RbE+MX8A?x0pfdI8q_*E{@`dvO@CxN^_G^
zi*7Mx7Tn^=1UWpQC_g#1xcC-xPHNsQw#0&h)V!2i?B$7>CElr(x0v&bQf{$j<`!q<
zm)~M9O3g`4EKa>8n37stQj}lmn3EHpnU|7ZUaZM;ivtvTd5O8Hw>Zl{PV)mZ(-M<Y
zi+wVSOKx%HfP)9bj^a!M`6wu{B=r_^a$><P#_U_nMX4pXSi$1AxS-DUODc+Dhq9wM
z@>0v)K&Gu^C{ku%VEAS2Y!wq)oLW>IQ<9mRUlNm+n4FoIqnlb$P?TC+oSC1eo019&
z&&>S1n4J8?l%T}2l+@y4mtwtw%3C6-6(vQ9$t5m{C5diD`MKcG<N)O=Cs0;n;bLTA
z<YMGu<X~oD<Y3}q<N%W_tY8*1A2W#NVq{}v`p?A3#K88SjfsO<jFE|jkA;PqgPDW5
zNEH-$%$a#<$)E%UG82UP85kHiVfi<Vk%1wFv6eA~DTNtSI;60H%7hfo6s~m6TBdY{
zTIO_yT9y)q1&lR}3mIJ)Vvp3amN0?xO$|dfOOXR3L*b_q<`nK4))bx;hE^s?hS>}$
zymOhsc?qnmhE*I?8nBfx)vz@)*0QJY)pFFZxiG|5)Piy?zd1uKM+tL^Kn>??h7`dX
zj@b+;LN)BO8RoKq%&1`m=}!?7XQ<&62jzK|1*|E;3mH>H92iR2QbfV|^B5yTp-v5F
zikKurig-3t(VY_J6bX>-!ZJ`9QNvloT*K1L62nx>T+32eR;UB<K`nO)bBbgQ_iTm~
zDX5D%KxKyuL##|KM-6igha^KSPYGuYPYG8u6NqoaFoCg9sDyg~PYv@z##&x5i)A5W
zEnf+94R?w(s0^&(t>FWg12TS95-Eu#iTXv5NDoOZF3|@k%VPaojPbYF{ZdOyiV}08
zSez4ca&NJEBo-7@-eL|;Ou5DCn3rBud5gs@HL>UxyGv$qdTDXVE%xBjf}+gg)KW19
z1_rK>AV<#-S6^4Z5Cuo)5ZBNk1;0ZZUjF<4|NkvUE^rA_1S++w_(F;jGfPr)Q}ap`
zQc@MFICP+8&M!v&B2YPA1S)5WK<TcC6_j8YjjB|jrHz6*$Z4tSItqFDc?w|56;ksQ
zlJj#5N>Z!%QA{pYDAi+NV8Bw6C=`PVpj1#H11buOLC%0?gi=s0f|X3g3dtFXMd^oE
zrh=3dlx7wyK$Yof^4?;~FTTZ|lUZDnl37$F%)r1<1ga#9xIrvY5CIAmNRnqQO3X`7
zjpEJAFUobyO9@IX&a6r;0@a$gnDPp4u_hK3C01(k-{Q$ltnf=Jf)t)nTxt0^DXB%E
z;-g3aWS|U)kY!+Ch~i94F3B$fl_pVK;1~oki=(*nl8XG(z}dFANFJnG8bnBeL(wma
z4N|DyVvM-O7zr*fAcP7?o*%h5NGwTo2j%+!Q2u0MVdP+BVt~T`EKDqnEa2SE!X&}S
z$H>9N!o<PI!6?Njz%0hd!o<VG#RSU#VD<lbSU4C3z-5IHBNr1F6BlEV4X7I7Dl!H+
z4HR0pSW8PXbBc?EKmwqe>K0paeqLH;Iyi)&A()nvpI8Fc3UM<io**o+>p5(4^HWN5
VQtdzmOEIWL0CExt^DuMR0|4auV&eb+

literal 0
HcmV?d00001

diff --git a/__pycache__/utils.cpython-38.pyc b/__pycache__/utils.cpython-38.pyc
index 7cc32c3a9dd6cc08a3f8747f0e523c76e61937a7..eb8bbc6317920b51516da144ef65c317a32d9963 100644
GIT binary patch
delta 670
zcmeC>Xy@V!<>lpKU|?Xlq|%WP!8(yohS6uDwt_Q53R4bSE_)O^BSQ*v3QG$^6h{hc
z3R??96lV&13P%e=6juso3Req56n6?^FoP!dOOPRcnvAzNQgic5GV}9_C+2Ndief5B
zyTx8ySelwzm8!{fi#0PhF+DYk4NMuWWGLd9?9AvS&BnmMP%Hp4fsv1qkCBIwkCBB@
zh>?%6h;Q-&#$06{1_p*AFu}{fz;KJLpeQvZvq%u6iwUHQsYqzD0h26a)nq@WV&)WP
ziOC0;logmkx>!L33rH7xT26jqiLp^SNC(4%c!$aVnPde)Ru{2@<XLi46Z47$ChIaA
z)r0K<6JQH@^YV*w9rIFxQj0UIQpG^_f?dnZ#U#e0!pOxa#K^_S!&D^Cz`&5q2sR%=
zfLsSM+!>@rhJk@0g)xN*6riBMOkq!FuVt)ZT)<Gnun-i89DcW09rMzQD(i2tyJQxp
zmll`YVsT4NEV{+&kyubrd5bwXG36F}aA`qNW^w8*cE8lplA^>MO~zZSnRzLx6`D-9
z*rB0c1hN?tWC(BYLnTA<{YrC_Qj1hUt^$P(1EUaQktPEJgC@@{*1Xc(`hv<LkOzxI
z85kI%1alKh3UcyGax#<j3MxUYC^iUP!~v3LOU}<r%S?w@$p;E=ArJuy46va{&Oo?@
h!zMRBr8Fni4isF)AYB}c9E?1SJWL$i9L%g7`~WTflHmXV

delta 418
zcmZqY>gC`I<>lpKU|?X_myn(?pJgJS45P(FZ3Shf6y_F&D7F-q6xJ4oDE1V#6!sQ|
zD2^1yU<OT&mmm#(6Z<wR-C`<9yTx8ySelwzm8!{fi#0PhF+KGb8<;X$$xy^QIhoN(
znw5cpp;!Q<i;<6!kCBIwkCBB@h>?%6h=1|{#$07y1_p*AFu}*bz;KJLpeQvZvk0W2
zm<gndsYrNo0Fx|Z+2lN?;>k~$lw?4{MJym~%pe`?X*v0cCB{ZC|1&T!Ojc)>6$DvX
z#0HXM$xThnD-xU>%4}2*wgXInE#l3~FUobyO9@IX&a6rmg}aiOi%E=0g^`O<h>?qt
zgQ-Z7fq_Aj^A>AfX>LJfks!!gagfo1xrrqOIr$|ynMryDl_1tFHV9qB4pPJqB7{K%
p$b=$ZeGn6DJ(vKyfx{*@KczG$)s7KlJt!_X7&#bum^ipOSOHwNSa1LU

diff --git a/buildEmotionModel.ipynb b/buildEmotionModel.ipynb
new file mode 100644
index 0000000..c5b961d
--- /dev/null
+++ b/buildEmotionModel.ipynb
@@ -0,0 +1,212 @@
+{
+ "metadata": {
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.8.5-final"
+  },
+  "orig_nbformat": 2,
+  "kernelspec": {
+   "name": "python385jvsc74a57bd031f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6",
+   "display_name": "Python 3.8.5 64-bit"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2,
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {},
+   "outputs": [
+    {
+     "output_type": "stream",
+     "name": "stdout",
+     "text": [
+      "Model used: firstModel\n",
+      "Image 1 sur 100 chargée\n",
+      "Image 2 sur 100 chargée\n",
+      "Image 3 sur 100 chargée\n",
+      "Image 4 sur 100 chargée\n",
+      "Image 5 sur 100 chargée\n",
+      "Image 6 sur 100 chargée\n",
+      "Image 7 sur 100 chargée\n",
+      "Image 8 sur 100 chargée\n",
+      "Image 9 sur 100 chargée\n",
+      "Image 10 sur 100 chargée\n",
+      "Image 11 sur 100 chargée\n",
+      "Image 12 sur 100 chargée\n",
+      "Image 13 sur 100 chargée\n",
+      "Image 14 sur 100 chargée\n",
+      "Image 15 sur 100 chargée\n",
+      "Image 16 sur 100 chargée\n",
+      "Image 17 sur 100 chargée\n",
+      "Image 18 sur 100 chargée\n",
+      "Image 19 sur 100 chargée\n",
+      "Image 20 sur 100 chargée\n",
+      "Image 21 sur 100 chargée\n",
+      "Image 22 sur 100 chargée\n",
+      "Image 23 sur 100 chargée\n",
+      "Image 24 sur 100 chargée\n",
+      "Image 25 sur 100 chargée\n",
+      "Image 26 sur 100 chargée\n",
+      "Image 27 sur 100 chargée\n",
+      "Image 28 sur 100 chargée\n",
+      "Image 29 sur 100 chargée\n",
+      "Image 30 sur 100 chargée\n",
+      "Image 31 sur 100 chargée\n",
+      "Image 32 sur 100 chargée\n",
+      "Image 33 sur 100 chargée\n",
+      "Image 34 sur 100 chargée\n",
+      "Image 35 sur 100 chargée\n",
+      "Image 36 sur 100 chargée\n",
+      "Image 37 sur 100 chargée\n",
+      "Image 38 sur 100 chargée\n",
+      "Image 39 sur 100 chargée\n",
+      "Image 40 sur 100 chargée\n",
+      "Image 41 sur 100 chargée\n",
+      "Image 42 sur 100 chargée\n",
+      "Image 43 sur 100 chargée\n",
+      "Image 44 sur 100 chargée\n",
+      "Image 45 sur 100 chargée\n",
+      "Image 46 sur 100 chargée\n",
+      "Image 47 sur 100 chargée\n",
+      "Image 48 sur 100 chargée\n",
+      "Image 49 sur 100 chargée\n",
+      "Image 50 sur 100 chargée\n",
+      "Image 51 sur 100 chargée\n",
+      "Image 52 sur 100 chargée\n",
+      "Image 53 sur 100 chargée\n",
+      "Image 54 sur 100 chargée\n",
+      "Image 55 sur 100 chargée\n",
+      "Image 56 sur 100 chargée\n",
+      "Image 57 sur 100 chargée\n",
+      "Image 58 sur 100 chargée\n",
+      "Image 59 sur 100 chargée\n",
+      "Image 60 sur 100 chargée\n",
+      "Image 61 sur 100 chargée\n",
+      "Image 62 sur 100 chargée\n",
+      "Image 63 sur 100 chargée\n",
+      "Image 64 sur 100 chargée\n",
+      "Image 65 sur 100 chargée\n",
+      "Image 66 sur 100 chargée\n",
+      "Image 67 sur 100 chargée\n",
+      "Image 68 sur 100 chargée\n",
+      "Image 69 sur 100 chargée\n",
+      "Image 70 sur 100 chargée\n",
+      "Image 71 sur 100 chargée\n",
+      "Image 72 sur 100 chargée\n",
+      "Image 73 sur 100 chargée\n",
+      "Image 74 sur 100 chargée\n",
+      "Image 75 sur 100 chargée\n",
+      "Image 76 sur 100 chargée\n",
+      "Image 77 sur 100 chargée\n",
+      "Image 78 sur 100 chargée\n",
+      "Image 79 sur 100 chargée\n",
+      "Image 80 sur 100 chargée\n",
+      "Image 81 sur 100 chargée\n",
+      "Image 82 sur 100 chargée\n",
+      "Image 83 sur 100 chargée\n",
+      "Image 84 sur 100 chargée\n",
+      "Image 85 sur 100 chargée\n",
+      "Image 86 sur 100 chargée\n",
+      "Image 87 sur 100 chargée\n",
+      "Image 88 sur 100 chargée\n",
+      "Image 89 sur 100 chargée\n",
+      "Image 90 sur 100 chargée\n",
+      "Image 91 sur 100 chargée\n",
+      "Image 92 sur 100 chargée\n",
+      "Image 93 sur 100 chargée\n",
+      "Image 94 sur 100 chargée\n",
+      "Image 95 sur 100 chargée\n",
+      "Image 96 sur 100 chargée\n",
+      "Image 97 sur 100 chargée\n",
+      "Image 98 sur 100 chargée\n",
+      "Image 99 sur 100 chargée\n",
+      "Image 100 sur 100 chargée\n",
+      "\n",
+      "TRAITEMENT ACTEUR N°14\n",
+      "\n",
+      "Traitement de 01-01-03-02-02-02-01.mp4, video 1/2\n",
+      "Lecture vidéo de 01-01-03-02-02-02-01.mp4\n",
+      "Donnée ajoutée, Images: 1 Labels: 1\n"
+     ]
+    }
+   ],
+   "source": [
+    "#@title Imports\n",
+    "#%load_ext autoreload  #Need to uncomment for import, dont understand\n",
+    "\n",
+    "import tensorflow as tf\n",
+    "from tensorflow import keras\n",
+    "from tensorflow.keras import datasets, layers, models, losses\n",
+    "import tensorflow_datasets as tfds\n",
+    "#from google.colab import files\n",
+    "\n",
+    "from matplotlib import image\n",
+    "import os\n",
+    "import numpy as np\n",
+    "import matplotlib.pyplot as plt\n",
+    "import matplotlib\n",
+    "import random as rd\n",
+    "import cv2\n",
+    "import csv\n",
+    "\n",
+    "from loadFer2013ds import *\n",
+    "from loadRavdessDs import *\n",
+    "from utils import *\n",
+    "\n",
+    "X, Y = loadFer2013Data(100)\n",
+    "W, Z = loadRavdessData(100)"
+   ]
+  },
+  {
+   "source": [],
+   "cell_type": "code",
+   "metadata": {},
+   "execution_count": null,
+   "outputs": []
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "#@title Hyperparamètres\n",
+    "classes = [\"Angry\", \"Disgust\", \"Fear\", \"Happy\", \"Sad\", \"Suprise\", \"Neutral\"]\n",
+    "Na = len(classes)\n",
+    "maxNbrImagesForEachClasses = float('inf')\n",
+    "h = 48\n",
+    "l = 48\n",
+    "p = 1\n",
+    "input_shape = (h, l, p)\n",
+    "\n",
+    "epochs = 5\n",
+    "batch_size = 128\n",
+    "validation_size = 0.1"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ]
+}
\ No newline at end of file
diff --git a/config.py b/config.py
index be19af0..7fd6a4d 100644
--- a/config.py
+++ b/config.py
@@ -1,8 +1,8 @@
-#Name of model used
+# Name of model used
 modelName = 'firstModel'
 
-#Emotions provided by the dataset
+# Emotions provided by the dataset
 emotions = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Suprise", "Neutral"]
 
-#Shape of input of the model
-input_shape = (48,48,1)
\ No newline at end of file
+# Shape of input of the model
+input_shape = (48, 48, 1)
diff --git a/imageProcess.py b/imageProcess.py
index a773b4d..cba699e 100644
--- a/imageProcess.py
+++ b/imageProcess.py
@@ -4,9 +4,11 @@ import numpy as np
 import faceAnalysis as fa
 import timeit as ti
 
-def imageProcess(image):
+def imageProcess(image, writeEmotion=True):
     #Objectives : detect faces, identify emotion associated on it, modify the image by framing faces and writing their emotions associated
     
+    facesList = []
+
     #Import faces and eyes detectors from cv2
     face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades+'haarcascade_frontalface_default.xml')
     eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades+'haarcascade_eye.xml')
@@ -25,16 +27,19 @@ def imageProcess(image):
         #Select face image
         face_gray = gray[y:y+h, x:x+w]
         face_color = image[y:y+h, x:x+w]
-        
+        facesList.append(face_color)
+
         #Detect eyes on the face, create green rectangle
         eyes = eye_cascade.detectMultiScale(face_gray)
         for (ex,ey,ew,eh) in eyes:
             cv2.rectangle(face_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),1)
 
         #Write emotion on the image
-        emotion = fa.detectEmotion(face_color)
-        cv2.putText(image, emotion, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2)
+        if writeEmotion:
+            emotion = fa.detectEmotion(face_color)
+            cv2.putText(image, emotion, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2)
 
+    return facesList
 
 def selectFace(image):
     #Return a face identified on an colored image
@@ -45,7 +50,7 @@ def selectFace(image):
     #Face detection is made on gray images
     gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
 
-    faces = face_cascade.detectMultiScale(gray, 1.3, 5) #This return a list of tuple locating faces on image
+    faces = face_cascade.detectMultiScale(gray, 1.03, 5) #This return a list of tuple locating faces on image
     
     #The face returned is the first face detected on the image (if exists)
     if faces != []:
diff --git a/loadFer2013ds.py b/loadFer2013ds.py
index 1818992..b10e6b8 100644
--- a/loadFer2013ds.py
+++ b/loadFer2013ds.py
@@ -1,62 +1,63 @@
-#This file load the dataset fer2013 as arrays. 
+# This file load the dataset fer2013 as arrays.
 import csv
 import numpy as np
 import cv2
 import matplotlib.pyplot as plt
 
 
-nbrImages = 35887
-maxNbrImages = nbrImages
-emotions = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Suprise", "Neutral"]
+def strToArray(string):  # Fer2013 provides images as string so it needs to be transformed
+    A = []
+    lenght = len(string)
+    i = 0
+    nbr = ""
 
-def strToArray(string):  #Fer2013 provides images as string so it needs to be transformed
-	A = []
-	lenght = len(string)
-	i=0
-	nbr = ""
+    while i < lenght:
+        car = string[i]
 
-	while i<lenght:
-		car = string[i]
+        if car != " ":
+            nbr += car
+        else:
+            A.append(int(nbr))
+            nbr = ""
+        i += 1
+    A.append(int(nbr))
 
-		if car != " ":
-			nbr += car
-		else:
-			A.append(int(nbr))
-			nbr = ""
-		i+=1
-	A.append(int(nbr))
-	
-	A = np.array(A)
-	A = np.reshape(A, (48, 48))
+    A = np.array(A)
+    A = np.reshape(A, (48, 48))
 
-	return A
+    return A
 
 
+# LOAD DATA AS ARRAY
 
-#LOAD DATA AS ARRAY
-X = []
-Y = []
+def loadFer2013Data(maxNbrImages=35887):
+    c = 0
+    nbrImagesFer2013 = 35887
+    filename = "data/fer2013.csv"
+    emotions = ["Angry", "Disgust", "Fear",
+                "Happy", "Sad", "Suprise", "Neutral"]
 
-filename = "data/fer2013.csv"
+    X = []
+    Y = []
 
-with open(filename,'r',encoding='utf-8') as file:
-	
-	csv_reader = csv.reader(file, delimiter=",")
-	next(csv_reader)  								#Passe la ligne de titre
-	
-	i=0
-	for row in csv_reader:
+    with open(filename, 'r', encoding='utf-8') as file:
 
-		i+=1
-		if i>maxNbrImages: break
-		
-		emotionNbr, stringImage, typeImage = row
-		traitement(emotionNbr, stringImage, typeImage)
+        csv_reader = csv.reader(file, delimiter=",")
+        next(csv_reader)  # Passe la ligne de titre
 
-		X.append(strToArray(stringImage))
-		Y.append(emotionNbr)
+        i = 0
+        for row in csv_reader:
+            i += 1
+            if i > maxNbrImages:
+                break
 
-		print(f"Image {i} sur {nbrImages} chargée", end='\r')
+            emotionNbr, stringImage, typeImage = row
 
-X = np.array(X)
-Y = np.array(Y)
\ No newline at end of file
+            X.append(strToArray(stringImage))
+            Y.append(emotionNbr)
+
+            print(f"Image {i} sur {maxNbrImages} chargée")
+
+    X = np.array(X)
+    Y = np.array(Y)
+    return X, Y
diff --git a/loadRavdessDs.py b/loadRavdessDs.py
new file mode 100644
index 0000000..cb1db15
--- /dev/null
+++ b/loadRavdessDs.py
@@ -0,0 +1,120 @@
+import os
+import cv2
+from utils import *
+import imageProcess as ip
+from config import input_shape
+
+
+def extractDataFromVideo(filename, videoName, facesList, labelsList):
+    # Extract every faces in a specified video and add it to a list of faces as well as labels corresponding.
+
+    # Start capture of a video
+    print("Lecture vidéo de", videoName)
+    frameRate = 1
+    cap = cv2.VideoCapture(filename)
+    if (cap.isOpened() == False):
+        print("Error opening video stream or file")
+
+    # Read until video is completed
+    k = 0
+    while (cap.isOpened()):
+        # Capture frame-by-frame
+        ret, frame = cap.read()
+        if ret == True:
+            k += 1
+
+            if k*frameRate >= 1:  # Read a frame each N frames where N=1/frameRate
+                k = 0
+
+                # Load image and labels
+
+                # Ravdess emotions list is not in the same order as fer2013 (reference)
+                emotionNbr = int(videoName[7])
+                emotion = emotions[emotionNbr]
+                emotionNbr = emotionToNumber(emotion)
+
+                #Detect faces on the image
+                newFaces = ip.imageProcess(frame, writeEmotion=False)
+
+                #If 2 faces were detected, it means an error was made since there is only single-person videos.
+                if len(newFaces) == 1:
+                    facesList += newFaces
+                    labelsList.append(emotionNbr)
+                    print("Donnée ajoutée, Images:", len(
+                        facesList), "Labels:", len(labelsList))
+                else: print("Erreur pour la donnée : Aucun ou plusieurs visages détectés")
+
+            # Press Q on keyboard to  exit
+            if cv2.waitKey(25) & 0xFF == ord('q'):
+                break
+
+            # Display the resulting frame
+            if True:
+                cv2.imshow('Frame', frame)
+
+        # Break the loop
+        else:
+            break
+
+    # When everything done, release the video capture object
+    cap.release()
+
+    # Closes all the frames
+    cv2.destroyAllWindows()
+
+    # Return face and label lists with new datas
+    return facesList, labelsList
+
+
+# LOAD DATA
+
+def loadRavdessData(maxNbrImages=float('inf')):
+
+    foldername = "data/ravdessTest/videos/"
+    emotions = ["_", "Neutral", "Calm", "Happy",
+                "Sad", "Angry", "Fear", "Disgust", "Suprise"]
+    facesList = []
+    labelsList = []
+
+    # For each actor...
+    for actorName in os.listdir(foldername):
+
+        # If we overreach the maximum number of images desired, stop
+        if len(facesList) > maxNbrImages:
+            break
+
+        print(f"\nTRAITEMENT ACTEUR N°{actorName[-2:]}\n")
+        videoNames = os.listdir(foldername+actorName)
+        nbrOfVideos = len(videoNames)
+
+        k = 0
+        # For each video...
+        for videoName in videoNames:
+            k += 1
+            print(f"Traitement de {videoName}, video {k}/{nbrOfVideos}")
+            filename = foldername+actorName+'/'+videoName
+
+            # Press Q on keyboard to exit ONE video
+            if cv2.waitKey(25) & 0xFF == ord('q'):
+                break
+
+            if videoName[7] == '2':
+                # Doesnt take Calm emotion into account
+                print("Emotion 'Calme', non prise en compte")
+            else:
+                facesList, labelsList = extractDataFromVideo(
+                    filename, videoName, facesList, labelsList)
+
+    # List of colored images N*M*3 faces to array of gray images 48*48*1
+    N = len(facesList)
+    print(f"Traitement des {N} visages détectés sur les vidéos de Ravdess")
+
+    for k in range(N):
+        visage = facesList[k]
+        facesList[k] = normAndResize(visage, input_shape)
+    X = np.array(facesList)
+
+    Y = np.array(labelsList)
+
+    print(N, " données chargées depuis Ravdess.")
+    return X, Y
diff --git a/test.py b/test.py
index 1f8a166..d376c9b 100644
--- a/test.py
+++ b/test.py
@@ -1 +1,23 @@
-#Use this file for test
\ No newline at end of file
+#Use this file for test
+
+import tensorflow as tf
+from tensorflow import keras
+from tensorflow.keras import datasets, layers, models, losses
+import tensorflow_datasets as tfds
+#from google.colab import files
+
+from matplotlib import image
+import os
+import numpy as np
+import matplotlib.pyplot as plt
+import matplotlib
+import random as rd
+import cv2
+import csv
+
+from loadFer2013ds import *
+from loadRavdessDs import *
+from utils import *
+
+X, Y = loadFer2013Data(100)
+W, Z = loadRavdessData(100)
\ No newline at end of file
diff --git a/utils.py b/utils.py
index d15c294..0bb6258 100644
--- a/utils.py
+++ b/utils.py
@@ -1,6 +1,7 @@
 import numpy as np
 import cv2
 import matplotlib.pyplot as plt
+from config import emotions
 
 def afficher(image):
     if len(image.shape) == 3:
@@ -29,7 +30,10 @@ def normAndResize(image, input_shape):
         image = image.mean(2)
     image = np.reshape(image, (h, l, p))  # restore third dimension
     image = image.astype("float32")
-    image = image/255  # normalisation
+    image = (image/127.5)-1  # normalisation
 
     return image
 
+def emotionToNumber(emotion):
+    emotions = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Suprise", "Neutral"]
+    return emotions.index(emotion)
-- 
GitLab