Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
No results found
Show changes
Commits on Source (1)
#include "mpi.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
int main( int argc, char* argv[] )
{
double starttime, endtime;
int proceso_id;
int num_element;
int m;
int i,d,li,c,t;
unsigned int j;
char processor_name[MPI_MAX_PROCESSOR_NAME];
int namelen;
int numprocsused;
// Intiating parallel part
MPI_Status stat;
MPI_Init(NULL,NULL);
MPI_Comm_size(MPI_COMM_WORLD, &numprocsused);
MPI_Comm_rank(MPI_COMM_WORLD,&proceso_id);
MPI_Get_processor_name(processor_name, &namelen);
unsigned int receivedElement;
if(proceso_id == 0) {
// if it is main process
printf("Enter number of elements\n");
scanf("%d",&num_element);
unsigned int *n = malloc (num_element * sizeof(unsigned int));
for (i= 0; i < num_element; i++){
n[i] = rand() % num_element;
}
// starting time calculation of the sort
starttime = MPI_Wtime();
// min and max values are got
unsigned int min = n[0];
unsigned int max = n[0];
for(i=0; i < num_element; i++) {
if(n[i] < min) { min = n[i]; }
if(n[i] > max) { max = n[i]; }
}
// calculating how many numbers each bucket/process will get numbers
int *elementQtyArray = malloc (sizeof(int) *numprocsused);
// default values
for(d=1; d < numprocsused; d++) {
elementQtyArray[d] = 0;
}
for(d=0; d < num_element; d++) {
int increaseOf = max/(numprocsused-1);
int iteration = 1;
int pridetas = 0;
for(j = increaseOf; j <= max; j = j + increaseOf) {
if(n[d] <= j) {
elementQtyArray[iteration]++;
pridetas = 1;
break;
}
iteration++;
}
if (pridetas == 1) {
elementQtyArray[iteration-1]++;
}
}
// Sending how many each process/bucket will get numbers
for(i=1; i<numprocsused; i++) {
MPI_Send(&elementQtyArray[i], 1, MPI_INT, i, 0, MPI_COMM_WORLD);
}
// doing the same, this time sending the numbers
for(d=0; d < num_element; d++) {
int increaseOf = max/(numprocsused-1);
int iteration = 1;
int issiunte = 0;
for (j = increaseOf; j <= max; j = j + increaseOf) {
if(n[d] <= j) {
MPI_Send(&n[d], 1, MPI_UNSIGNED, iteration, 1, MPI_COMM_WORLD);
issiunte = 1;
break;
}
iteration++;
}
if (issiunte == 1) {
MPI_Send(&n[d], 1, MPI_UNSIGNED, iteration-1, 1, MPI_COMM_WORLD);
}
}
//RUSAK
// Getting back results and adding them to one array
int lastIndex = 0; int indexi = 0;
for(i=1; i < numprocsused; i++) {
unsigned int * recvArray = malloc(sizeof(unsigned int)*elementQtyArray[i]);
MPI_Recv(&recvArray[0], elementQtyArray[i], MPI_UNSIGNED, i, 2, MPI_COMM_WORLD, &stat);
if(lastIndex == 0) {
lastIndex = elementQtyArray[i];
}
for(j=0; j<elementQtyArray[i]; j++) {
if(recvArray[j] == 0) {}
else{
n[indexi] = recvArray[j];
indexi++;
}
}
}
// stoping the time
endtime = MPI_Wtime();
// showing results in array
for(c = 0; c<num_element; c++){
printf("Hasil Sorting index - %d : %d \n",c,n[c]);
}
// sorting results
printf("it took %f seconds \n", endtime-starttime);
printf("Numbers: %d \n", num_element);
printf("Processes: %d \n", numprocsused);
} else {
int elementQtyUsed;
MPI_Recv(&elementQtyUsed, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &stat);
unsigned int *localArray[elementQtyUsed];
for (li = 0; li < elementQtyUsed; li++){
MPI_Recv(&receivedElement, 1, MPI_UNSIGNED, 0, 1, MPI_COMM_WORLD, &stat);
localArray[li] = receivedElement;
}
for (c = 1; c <= elementQtyUsed -1; c++){
d = c;
while (d > 0 && localArray[d] < localArray[d-1]){
t = localArray[d];
localArray[d] = localArray[d-1];
localArray[d-1] = t;
d--;
}
}
MPI_Send(localArray, elementQtyUsed, MPI_UNSIGNED, 0, 2, MPI_COMM_WORLD);
}
MPI_Finalize();
return 0;
}
\ No newline at end of file
/*Oleh : Steven Andianto 13513018 */
process 4
n=50000 : 7.642137 secs
n=100000 : 15.345178 secs
n=200000 : 75.477128 secs
n=400000 : 310.792712 secs
process 8
n=50000 : 0.823392 secs
n=100000 : 9.133500 secs
n=200000 : 11.241435 secs
n=400000 : 43.124797 secs
process 16
n=50000 : 1.576477 secs
n=100000 : 4.026215 secs
n=200000 : 5.741722 secs
n=400000 : 24.445183 secs
process 32
n=50000 : 0.590357 secs
n=100000 : 1.472241 secs
n=200000 : 2.470680 secs
n=400000 : 6.619264 secs
Catatan : Kelompok satu orang karena hari ini tidak masuk jadi kerja tidak di labdas.
menggunakan hasil minggu lalu karena mengimplementasikan OpenMP
\ No newline at end of file
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
void Hello(void); /* Thread function */
int main(int argc, char *argv[]) {
int thread_count = strtol(argv[1], NULL, 10);
#pragma omp parallel num_threads(thread_count)
Hello();
return 0;
}
void Hello(void) {
int my_rank = omp_get_thread_num();
int thread_count = omp_get_num_threads();
printf("Hello from thread %d of %d\n",
my_rank, thread_count);
}
\ No newline at end of file
#daftar host
localhost
167.205.35.40