diff --git a/prod_mat_vec/bin/Debug/prod_mat_vec.exe b/prod_mat_vec/bin/Debug/prod_mat_vec.exe
new file mode 100644
index 0000000000000000000000000000000000000000..7d4224c794774bd4bb59b11f23ddb626c5e0b9d2
Binary files /dev/null and b/prod_mat_vec/bin/Debug/prod_mat_vec.exe differ
diff --git a/prod_mat_vec/main.c b/prod_mat_vec/main.c
index e8fd89df701968c88e0d284c69ed3b99ed9e6fd9..d2bf2f18a8edc9618d0d1f2935bdbd831dfe46c1 100644
--- a/prod_mat_vec/main.c
+++ b/prod_mat_vec/main.c
@@ -1,5 +1,7 @@
 #include <stdio.h>
+#include <stdlib.h>
 #include <mpi.h>
+#include <time.h>
 
 /*
 Commands :
@@ -8,8 +10,9 @@ C:\MPICH2\bin\mpiexec.exe -localonly 10
 */
 
 /*
-*   The reference book says we should use all_gather and all_gatherv to make all the processor
-*   access the x vector. BUT, x isn't going to be modified in the process. Moreover
+*   The reference book says we should use all_gather and all_gatherv to make all the processors
+*   access the x vector.
+*   BUT, what fonction should we use to get the entire . Moreover
 *   the website http://mpitutorial.com say that MPI_Allgather should be used "only"
 *   if all the processes contains a part of the data and EACH ONE want the entire data
 *   dispatched through the processes.
@@ -21,23 +24,114 @@ C:\MPICH2\bin\mpiexec.exe -localonly 10
 *   Moreover, it is said that we should use allgatherv to get the final matrix but, if we consider
 *   this stackoverflow answer :
 *   http://stackoverflow.com/questions/15049190/difference-between-mpi-allgather-and-mpi-alltoall-functions#answer-34113431
-*   We see that using all gatherv will, more than duplicate the data through each processor.
+*   We see that using all gatherv will duplicate the data through each processor.
 *   Using gather seems more appropriated.
 */
 
+void printMatrix(int width, int height, float **matrix){
+    for(int i=0; i!=height; i++){
+        for(int j=0; j!=width; j++){
+            printf("%5.1f, ",matrix[i][j]);
+        }
+        printf("\n");
+    }
+}
+void printVector(int height, float *vector){
+    for(int i=0; i!=height; i++){
+        printf("%5.2f\n",vector[i]);
+    }
+}
+
+void initProb(int width, int height, float **matrix, float *vector, int rank){
+    srand(time(NULL)+rank);
+    for(int i=0; i!=height; i++){
+        for(int j=0; j!=width; j++){
+            matrix[i][j]=(float)(rand()%20);
+        }
+        vector[i]=(float)(rand()%10);
+    }
+}
+
+void prod_mat_vec(int width, int height, float * vector_res, float ** matrix, float * vector_tot){
+    for(int i=0; i!=height; i++){
+        vector_res[i]=0;
+        for(int j=0; j!=width; j++){
+            vector_res[i]+=matrix[i][j]*vector_tot[j];
+        }
+    }
+}
+
 int main (int argc, char *argv[]) {
-    int rank, size;
-    MPI_Status status;
+    int rank, size, size_prob, w_prob, h_prob, h_base;
+    float * vector;
+    float * vector_tot;
+    float * vector_res;
+    float * vector_res_tot;
+    float ** matrix=0;
 
-    int test=0;
+    MPI_Status status;
 
     MPI_Init (&argc, &argv); /* starts MPI */
 
     MPI_Comm_rank(MPI_COMM_WORLD, &rank); /* get current process id */
     MPI_Comm_size(MPI_COMM_WORLD, &size); /* get number of processes */
 
-    MPI_Send(&test,1,MPI_DOUBLE,0,10,MPI_COMM_WORLD);
-    MPI_Recv(&test,1,MPI_DOUBLE,0,10,MPI_COMM_WORLD,&status);
+
+    printf("proc %d :\n",rank);
+
+    size_prob=8;
+
+    h_base=(size_prob/size)+1; //
+    if(size_prob%size==0){
+        h_prob=size_prob/size;
+        h_base=h_prob;
+    }else{
+        if(rank!=size-1)
+            h_prob=(size_prob/size)+1;
+        else{
+            h_prob=size_prob%size;
+        }
+    }
+    w_prob=size_prob;
+    printf("height %d : %d\n",rank,h_prob);
+
+    vector=malloc(sizeof(float)*h_base);
+    vector_tot=malloc(sizeof(float)*size_prob);
+    vector_res=malloc(sizeof(float)*h_base);
+    vector_res_tot=malloc(sizeof(float)*size_prob);
+
+    matrix = (int **)malloc(w_prob * sizeof(float*));
+    for(int i=0;i<h_base;i++){
+        matrix[i]=malloc(sizeof(float)*w_prob);
+    }
+    initProb(w_prob,h_prob,matrix,vector,rank);
+
+    printMatrix(w_prob,h_prob,matrix);
+    printVector(h_prob,vector);
+
+    MPI_Allgather(vector,h_prob,MPI_FLOAT,vector_tot,h_base,MPI_FLOAT, MPI_COMM_WORLD);
+    printf("Vector total %d : \n",rank);
+    printVector(size_prob,vector_tot);
+
+    prod_mat_vec(w_prob,h_prob,vector_res,matrix,vector_tot);
+
+    printf("Vector res %d : \n",rank);
+    printVector(h_prob,vector_res);
+
+    MPI_Allgather(vector_res,h_prob,MPI_FLOAT,vector_res_tot,h_base,MPI_FLOAT, MPI_COMM_WORLD);
+
+    if(rank==0){
+        printf("prod_vec_matrice = \n");
+        printVector(size_prob,vector_res_tot);
+    }
+
+    /*MPI_Send(&test,1,MPI_DOUBLE,0,10,MPI_COMM_WORLD);
+    MPI_Recv(&test,1,MPI_DOUBLE,0,10,MPI_COMM_WORLD,&status);*/
+
+    free(vector);
+    for(int i=0;i<h_prob;i++){
+        free(matrix[i]);
+    }
 
     MPI_Finalize();
 }
diff --git a/prod_mat_vec/main.c.save-failed b/prod_mat_vec/main.c.save-failed
new file mode 100644
index 0000000000000000000000000000000000000000..826a07fef6c07c4a229d3ca501fd0dac6b6ebe19
--- /dev/null
+++ b/prod_mat_vec/main.c.save-failed
@@ -0,0 +1,43 @@
+#include <stdio.h>
+#include <mpi.h>
+
+/*
+Commands :
+cd 'B:\Mes Documents\progra\calcInt\'
+C:\MPICH2\bin\mpiexec.exe -localonly 10
+*/
+
+/*
+*   The reference book says we should use all_gather and all_gatherv to make all the processor
+*   access the x vector. BUT, x isn't going to be modified in the process. Moreover
+*   the website http://mpitutorial.com say that MPI_Allgather should be used "only"
+*   if all the processes contains a part of the data and EACH ONE want the entire data
+*   dispatched through the processes.
+*   This image :http://mpitutorial.com/tutorials/mpi-scatter-gather-and-allgather/allgather.png
+*   Show this point.
+*   More over according to the problematic given MPI_Scatter and MPI_Gather seems to be
+*   more usable in this specific context of a dense matrix vector product.
+*
+*   Moreover, it is said that we should use allgatherv to get the final matrix but, if we consider
+*   this stackoverflow answer :
+*   http://stackoverflow.com/questions/15049190/difference-between-mpi-allgather-and-mpi-alltoall-functions#answer-34113431
+*   We see that using all gatherv will duplicate the data through each processor.
+*   Using gather seems more appropriated.
+*/
+
+int main (int argc, char *argv[]) {
+    int rank, size;
+    MPI_Status status;
+
+    int test=0;
+
+    MPI_Init (&argc, &argv); /* starts MPI */
+
+    MPI_Comm_rank(MPI_COMM_WORLD, &rank); /* get current process id */
+    MPI_Comm_size(MPI_COMM_WORLD, &size); /* get number of processes */
+
+    MPI_Send(&test,1,MPI_DOUBLE,0,10,MPI_COMM_WORLD);
+    MPI_Recv(&test,1,MPI_DOUBLE,0,10,MPI_COMM_WORLD,&status);
+
+    MPI_Finalize();
+}
diff --git a/prod_mat_vec/obj/Debug/main.o b/prod_mat_vec/obj/Debug/main.o
new file mode 100644
index 0000000000000000000000000000000000000000..4cd9353604a54bfa8653c73455f2280b0073cb72
Binary files /dev/null and b/prod_mat_vec/obj/Debug/main.o differ
diff --git a/prod_mat_vec/prod_mat_vec.depend b/prod_mat_vec/prod_mat_vec.depend
new file mode 100644
index 0000000000000000000000000000000000000000..7b8e971c1c8efe7c88a297db341595e25c41c688
--- /dev/null
+++ b/prod_mat_vec/prod_mat_vec.depend
@@ -0,0 +1,7 @@
+# depslib dependency file v1.0
+1488577259 source:b:\mes documents\progra\calcint\prod_mat_vec\main.c
+	<stdio.h>
+	<stdlib.h>
+	<mpi.h>
+	<time.h>
+