Skip to content
Snippets Groups Projects
Commit 088d6bac authored by Nicolas Fley's avatar Nicolas Fley
Browse files

prod_mat_vec done

parent c87bfa3c
No related branches found
No related tags found
No related merge requests found
File added
#include <stdio.h> #include <stdio.h>
#include <stdlib.h>
#include <mpi.h> #include <mpi.h>
#include <time.h>
/* /*
Commands : Commands :
...@@ -8,8 +10,9 @@ C:\MPICH2\bin\mpiexec.exe -localonly 10 ...@@ -8,8 +10,9 @@ C:\MPICH2\bin\mpiexec.exe -localonly 10
*/ */
/* /*
* The reference book says we should use all_gather and all_gatherv to make all the processor * The reference book says we should use all_gather and all_gatherv to make all the processors
* access the x vector. BUT, x isn't going to be modified in the process. Moreover * access the x vector.
* BUT, what fonction should we use to get the entire . Moreover
* the website http://mpitutorial.com say that MPI_Allgather should be used "only" * the website http://mpitutorial.com say that MPI_Allgather should be used "only"
* if all the processes contains a part of the data and EACH ONE want the entire data * if all the processes contains a part of the data and EACH ONE want the entire data
* dispatched through the processes. * dispatched through the processes.
...@@ -21,23 +24,114 @@ C:\MPICH2\bin\mpiexec.exe -localonly 10 ...@@ -21,23 +24,114 @@ C:\MPICH2\bin\mpiexec.exe -localonly 10
* Moreover, it is said that we should use allgatherv to get the final matrix but, if we consider * Moreover, it is said that we should use allgatherv to get the final matrix but, if we consider
* this stackoverflow answer : * this stackoverflow answer :
* http://stackoverflow.com/questions/15049190/difference-between-mpi-allgather-and-mpi-alltoall-functions#answer-34113431 * http://stackoverflow.com/questions/15049190/difference-between-mpi-allgather-and-mpi-alltoall-functions#answer-34113431
* We see that using all gatherv will, more than duplicate the data through each processor. * We see that using all gatherv will duplicate the data through each processor.
* Using gather seems more appropriated. * Using gather seems more appropriated.
*/ */
void printMatrix(int width, int height, float **matrix){
for(int i=0; i!=height; i++){
for(int j=0; j!=width; j++){
printf("%5.1f, ",matrix[i][j]);
}
printf("\n");
}
}
void printVector(int height, float *vector){
for(int i=0; i!=height; i++){
printf("%5.2f\n",vector[i]);
}
}
void initProb(int width, int height, float **matrix, float *vector, int rank){
srand(time(NULL)+rank);
for(int i=0; i!=height; i++){
for(int j=0; j!=width; j++){
matrix[i][j]=(float)(rand()%20);
}
vector[i]=(float)(rand()%10);
}
}
void prod_mat_vec(int width, int height, float * vector_res, float ** matrix, float * vector_tot){
for(int i=0; i!=height; i++){
vector_res[i]=0;
for(int j=0; j!=width; j++){
vector_res[i]+=matrix[i][j]*vector_tot[j];
}
}
}
int main (int argc, char *argv[]) { int main (int argc, char *argv[]) {
int rank, size; int rank, size, size_prob, w_prob, h_prob, h_base;
MPI_Status status; float * vector;
float * vector_tot;
float * vector_res;
float * vector_res_tot;
float ** matrix=0;
int test=0; MPI_Status status;
MPI_Init (&argc, &argv); /* starts MPI */ MPI_Init (&argc, &argv); /* starts MPI */
MPI_Comm_rank(MPI_COMM_WORLD, &rank); /* get current process id */ MPI_Comm_rank(MPI_COMM_WORLD, &rank); /* get current process id */
MPI_Comm_size(MPI_COMM_WORLD, &size); /* get number of processes */ MPI_Comm_size(MPI_COMM_WORLD, &size); /* get number of processes */
MPI_Send(&test,1,MPI_DOUBLE,0,10,MPI_COMM_WORLD);
MPI_Recv(&test,1,MPI_DOUBLE,0,10,MPI_COMM_WORLD,&status); printf("proc %d :\n",rank);
size_prob=8;
h_base=(size_prob/size)+1; //
if(size_prob%size==0){
h_prob=size_prob/size;
h_base=h_prob;
}else{
if(rank!=size-1)
h_prob=(size_prob/size)+1;
else{
h_prob=size_prob%size;
}
}
w_prob=size_prob;
printf("height %d : %d\n",rank,h_prob);
vector=malloc(sizeof(float)*h_base);
vector_tot=malloc(sizeof(float)*size_prob);
vector_res=malloc(sizeof(float)*h_base);
vector_res_tot=malloc(sizeof(float)*size_prob);
matrix = (int **)malloc(w_prob * sizeof(float*));
for(int i=0;i<h_base;i++){
matrix[i]=malloc(sizeof(float)*w_prob);
}
initProb(w_prob,h_prob,matrix,vector,rank);
printMatrix(w_prob,h_prob,matrix);
printVector(h_prob,vector);
MPI_Allgather(vector,h_prob,MPI_FLOAT,vector_tot,h_base,MPI_FLOAT, MPI_COMM_WORLD);
printf("Vector total %d : \n",rank);
printVector(size_prob,vector_tot);
prod_mat_vec(w_prob,h_prob,vector_res,matrix,vector_tot);
printf("Vector res %d : \n",rank);
printVector(h_prob,vector_res);
MPI_Allgather(vector_res,h_prob,MPI_FLOAT,vector_res_tot,h_base,MPI_FLOAT, MPI_COMM_WORLD);
if(rank==0){
printf("prod_vec_matrice = \n");
printVector(size_prob,vector_res_tot);
}
/*MPI_Send(&test,1,MPI_DOUBLE,0,10,MPI_COMM_WORLD);
MPI_Recv(&test,1,MPI_DOUBLE,0,10,MPI_COMM_WORLD,&status);*/
free(vector);
for(int i=0;i<h_prob;i++){
free(matrix[i]);
}
MPI_Finalize(); MPI_Finalize();
} }
#include <stdio.h>
#include <mpi.h>
/*
Commands :
cd 'B:\Mes Documents\progra\calcInt\'
C:\MPICH2\bin\mpiexec.exe -localonly 10
*/
/*
* The reference book says we should use all_gather and all_gatherv to make all the processor
* access the x vector. BUT, x isn't going to be modified in the process. Moreover
* the website http://mpitutorial.com say that MPI_Allgather should be used "only"
* if all the processes contains a part of the data and EACH ONE want the entire data
* dispatched through the processes.
* This image :http://mpitutorial.com/tutorials/mpi-scatter-gather-and-allgather/allgather.png
* Show this point.
* More over according to the problematic given MPI_Scatter and MPI_Gather seems to be
* more usable in this specific context of a dense matrix vector product.
*
* Moreover, it is said that we should use allgatherv to get the final matrix but, if we consider
* this stackoverflow answer :
* http://stackoverflow.com/questions/15049190/difference-between-mpi-allgather-and-mpi-alltoall-functions#answer-34113431
* We see that using all gatherv will duplicate the data through each processor.
* Using gather seems more appropriated.
*/
int main (int argc, char *argv[]) {
int rank, size;
MPI_Status status;
int test=0;
MPI_Init (&argc, &argv); /* starts MPI */
MPI_Comm_rank(MPI_COMM_WORLD, &rank); /* get current process id */
MPI_Comm_size(MPI_COMM_WORLD, &size); /* get number of processes */
MPI_Send(&test,1,MPI_DOUBLE,0,10,MPI_COMM_WORLD);
MPI_Recv(&test,1,MPI_DOUBLE,0,10,MPI_COMM_WORLD,&status);
MPI_Finalize();
}
File added
# depslib dependency file v1.0
1488577259 source:b:\mes documents\progra\calcint\prod_mat_vec\main.c
<stdio.h>
<stdlib.h>
<mpi.h>
<time.h>
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment