diff --git a/a b/a new file mode 100755 index 0000000000000000000000000000000000000000..530e3c15ceeaea09bb140e8f812cefd9353a441b Binary files /dev/null and b/a differ diff --git a/broadcast.c b/broadcast.c new file mode 100644 index 0000000000000000000000000000000000000000..7eabc79ff16621dcd6233be7ba4d95a731ab80ef --- /dev/null +++ b/broadcast.c @@ -0,0 +1,71 @@ +#include <stdio.h> +#include <stdlib.h> +#include <mpi.h> +#include <assert.h> + +void my_bcast(void* data, int count, MPI_Datatype datatype, int root, MPI_Comm communicator) { + int world_rank; + MPI_Comm_rank(communicator, &world_rank); + int world_size; + MPI_Comm_size(communicator, &world_size); + + if (world_rank == root) { + // If we are the root process, send our data to everyone + int i; + for (i = 0; i < world_size; i++) { + if (i != world_rank) { + MPI_Send(data, count, datatype, i, 0, communicator); + } + } + } else { + // If we are a receiver process, receive the data from the root + MPI_Recv(data, count, datatype, root, 0, communicator, MPI_STATUS_IGNORE); + } +} + +int main(int argc, char** argv) { + if (argc != 3) { + fprintf(stderr, "Usage: compare_bcast num_elements num_trials\n"); + exit(1); + } + + int num_elements = atoi(argv[1]); + int num_trials = atoi(argv[2]); + + MPI_Init(NULL, NULL); + + int world_rank; + MPI_Comm_rank(MPI_COMM_WORLD, &world_rank); + + double total_my_bcast_time = 0.0; + double total_mpi_bcast_time = 0.0; + int i; + int* data = (int*)malloc(sizeof(int) * num_elements); + assert(data != NULL); + + for (i = 0; i < num_trials; i++) { + // Time my_bcast + // Synchronize before starting timing + MPI_Barrier(MPI_COMM_WORLD); + total_my_bcast_time = MPI_Wtime(); + my_bcast(data, num_elements, MPI_INT, 0, MPI_COMM_WORLD); + // Synchronize again before obtaining final time + MPI_Barrier(MPI_COMM_WORLD); + total_my_bcast_time = MPI_Wtime(); + // Time MPI_Bcast + MPI_Barrier(MPI_COMM_WORLD); + total_mpi_bcast_time = MPI_Wtime(); + MPI_Bcast(data, num_elements, MPI_INT, 0, MPI_COMM_WORLD); + MPI_Barrier(MPI_COMM_WORLD); + total_mpi_bcast_time += MPI_Wtime(); + } + + // Print off timing information + if (world_rank == 0) { + printf("Data size = %d, Trials = %d\n", num_elements * (int)sizeof(int), num_trials); + printf("Avg my_bcast time = %lf\n", total_my_bcast_time / num_trials); + printf("Avg MPI_Bcast time = %lf\n", total_mpi_bcast_time / num_trials); + } + + MPI_Finalize(); +} diff --git a/mpi_hostfile b/mpi_hostfile new file mode 100644 index 0000000000000000000000000000000000000000..bffbf8847866f00c5ba8aefc9d5856feb4a31cb3 --- /dev/null +++ b/mpi_hostfile @@ -0,0 +1,11 @@ +#daftar host +localhost +167.205.35.26 +167.205.35.28 +167.205.35.29 +167.205.35.30 +167.205.35.31 +#167.205.35.32 +#167.205.35.33 +#167.205.35.34 + diff --git a/scatgat.c b/scatgat.c new file mode 100644 index 0000000000000000000000000000000000000000..2f3719a3724af395077229316a9756b60a74ff61 --- /dev/null +++ b/scatgat.c @@ -0,0 +1,76 @@ +#include <stdio.h> +#include <stdlib.h> +#include <time.h> +#include <mpi.h> +#include <assert.h> + +float *create_consecutive(int num_elements) { + float *rand_nums = (float *)malloc(sizeof(float) * num_elements); + assert(rand_nums != NULL); + int i; + for (i = 0; i < num_elements; i++) { + rand_nums[i] = i+1; + } + return rand_nums; +} + +float compute_avg(float *array, int num_elements) { + float sum = 0.f; + int i; + for (i = 0; i < num_elements; i++) { + sum += array[i]; + } + return sum / num_elements; +} + +int main(int argc, char** argv) { + if (argc != 2) { + fprintf(stderr, "Usage: avg num_elements_per_proc\n"); + exit(1); + } + + int num_elements_per_proc = atoi(argv[1]); + srand(time(NULL)); + + MPI_Init(NULL, NULL); + + int world_rank; + MPI_Comm_rank(MPI_COMM_WORLD, &world_rank); + int world_size; + MPI_Comm_size(MPI_COMM_WORLD, &world_size); + + float *rand_nums = NULL; + if (world_rank == 0) { + rand_nums = create_consecutive(num_elements_per_proc * world_size); + } + + float *sub_rand_nums = (float *)malloc(sizeof(float) * num_elements_per_proc); + assert(sub_rand_nums != NULL); + + MPI_Scatter(rand_nums, num_elements_per_proc, MPI_FLOAT, sub_rand_nums, num_elements_per_proc, MPI_FLOAT, 0, MPI_COMM_WORLD); + + float sub_avg = compute_avg(sub_rand_nums, num_elements_per_proc); + + float *sub_avgs = NULL; + if (world_rank == 0) { + sub_avgs = (float *)malloc(sizeof(float) * world_size); + assert(sub_avgs != NULL); + } + + MPI_Gather(&sub_avg, 1, MPI_FLOAT, sub_avgs, 1, MPI_FLOAT, 0, MPI_COMM_WORLD); + + if (world_rank == 0) { + float avg = compute_avg(sub_avgs, world_size); + printf("Avg of all elements is %f\n", avg); + } + + if (world_rank == 0) { + free(rand_nums); + free(sub_avgs); + } + + free(sub_rand_nums); + + MPI_Barrier(MPI_COMM_WORLD); + MPI_Finalize(); +} diff --git a/sendrec.c b/sendrec.c new file mode 100644 index 0000000000000000000000000000000000000000..59046f136de2247eb4fc7ec15dcbcab422c052bc --- /dev/null +++ b/sendrec.c @@ -0,0 +1,30 @@ +#include <mpi.h> +#include <stdio.h> + +int main(int argc, char *argv[]) { + int numtasks, rank, dest, source, rc, count, tag=1; + char inmsg, outmsg='x'; + MPI_Status Stat; + + MPI_Init(&argc,&argv); + MPI_Comm_size(MPI_COMM_WORLD, &numtasks); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + + if (rank == 0) { + dest = 1; + source = 1; + rc = MPI_Send(&outmsg, 1, MPI_CHAR, dest, tag, MPI_COMM_WORLD); + rc = MPI_Recv(&inmsg, 1, MPI_CHAR, source, tag, MPI_COMM_WORLD, &Stat); + } + else if (rank == 1) { + dest = 0; + source = 0; + rc = MPI_Recv(&inmsg, 1, MPI_CHAR, source, tag, MPI_COMM_WORLD, &Stat); + rc = MPI_Send(&outmsg, 1, MPI_CHAR, dest, tag, MPI_COMM_WORLD); + } + + rc = MPI_Get_count(&Stat, MPI_CHAR, &count); + printf("Task %d: Received %d char(s) from task %d with tag %d \n", rank, count, Stat.MPI_SOURCE, Stat.MPI_TAG); + + MPI_Finalize(); +} diff --git a/tes.c b/tes.c new file mode 100644 index 0000000000000000000000000000000000000000..d1f860b069e84bc48592ae1af0380acdafa5a351 --- /dev/null +++ b/tes.c @@ -0,0 +1,99 @@ +#include <stdio.h> +#include <stdlib.h> +#include <time.h> +#include <mpi.h> +#include <assert.h> + +float *create_rand_nums(int num_elements) { + float *rand_nums = (float *)malloc(sizeof(float) * num_elements); + assert(rand_nums != NULL); + int i; + for (i = 0; i < num_elements; i++) { + rand_nums[i] = (rand() / (float)RAND_MAX); + } + return rand_nums; +} + +// n adalah jumlah element +float *insertion_sort(float array[], int n) { + float tarray[n]; + int d,c,t; + for (c = 1 ; c <= n - 1; c++) { + d = c; + + while ( d > 0 && array[d] < array[d-1]) { + t = array[d]; + array[d] = array[d-1]; + array[d-1] = t; + + d--; + } + } + int i; + for (i = 0 ; i < n; i++) { + tarray[i] = array[i]; + } + return tarray; +} + +int main(int argc, char** argv) { + if (argc != 2) { + fprintf(stderr, "Usage: avg num_elements_per_proc\n"); + exit(1); + } + + int num_elements = atoi(argv[1]); + srand(time(NULL)); + + MPI_Init(NULL, NULL); + + int world_rank; + MPI_Comm_rank(MPI_COMM_WORLD, &world_rank); + int world_size; + MPI_Comm_size(MPI_COMM_WORLD, &world_size); + + int num_elements_per_proc = world_size; + + float *rand_nums = NULL; + if (world_rank == 0) { + rand_nums = create_rand_nums(num_elements); + } + int i; + for (i = 0; i < num_elements; i++) { + printf("BBB %f \n", rand_nums[i]); + } + + float *sub_rand_nums = (float *)malloc(sizeof(float) * num_elements_per_proc); + assert(sub_rand_nums != NULL); + + MPI_Scatter(rand_nums, num_elements_per_proc, MPI_FLOAT, sub_rand_nums, num_elements_per_proc, MPI_FLOAT, 0, MPI_COMM_WORLD); + + float *sub_rand_numss = insertion_sort(sub_rand_nums, num_elements_per_proc); + + float *sub_avgs = NULL; + if (world_rank == 0) { + sub_avgs = (float *)malloc(sizeof(float) * num_elements); + assert(sub_avgs != NULL); + } + + for (i = 0; i < 3; i++) { + printf("AAAA %f\n",sub_rand_numss[i]); + } + //MPI_Gather(&sub_avg, 1, MPI_FLOAT, sub_avgs, 1, MPI_FLOAT, 0, MPI_COMM_WORLD); + + //Concat + /*if (world_rank == 0) { + float avg = compute_avg(sub_avgs, world_size); + printf("Avg of all elements is %f\n", avg); + } */ + + if (world_rank == 0) { + free(rand_nums); + free(sub_avgs); + } + + free(sub_rand_nums); + + MPI_Barrier(MPI_COMM_WORLD); + MPI_Finalize(); +}