Skip to content
Snippets Groups Projects
Commit 22f85a33 authored by Angela Lynn's avatar Angela Lynn
Browse files

coba2

parent 729bc0e6
Branches
No related merge requests found
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
#include <assert.h>
int *create_rand_nums(int num_elements) {
int *rand_nums = (int *)malloc(sizeof(int) * num_elements);
assert(rand_nums != NULL);
int i;
for (i = 0; i < num_elements; i++) {
rand_nums[i] = rand() % num_elements;
}
return rand_nums;
}
int main(int argc, char** argv) {
if (argc != 2) {
fprintf(stderr, "Usage: avg num_elements\n");
}
srand(time(NULL));
int world_rank;
int thread_count;
int i, j, k, l, t, rc, max = -RAND_MAX, min = RAND_MAX;
int total_elements = atoi(argv[1]);
int range;
int my_rank;
int thread_count = omp_get_num_threads();
int inmsg[total_elements];
int bucket[total_elements];
int *rand_nums = NULL;
double time_start, time_stop;
rand_nums = create_rand_nums(total_elements);
for (i = 0; i < total_elements; i++) {
if (rand_nums[i] > max)
max = rand_nums[i];
if (rand_nums[i] < min)
min = rand_nums[i];
}
range = (max - min) / thread_count + ((max - min) % thread_count > 0);
//time_start = MPI_Wtime();
//bucketing
#pragma omp parallel num_threads(thread_count,position,order) \ shared(*rand_nums) private(bucket,i,j,k,my_rank)
my_rank = omp_get_thread_num();
for (j = 0; j < total_elements; j++) {
if ((rand_nums[j] < my_rank * range + range) && (rand_nums[j] >= my_rank * range)) {
bucket[i] = rand_nums[j];
i++;
}
}
for (l = i; l < total_elements; l++) {
bucket[l] = RAND_MAX;
}
//Sort
for (k = 0; k < i-1; k++)
for (j = k+1; j < i; j++)
if (bucket[k] > bucket[j]) {
int temp = bucket[k];
bucket[k] = bucket[j];
bucket[j] = temp;
}
while (order != my_rank);
#pragma omp critical
for (j = 0; j < total_elements; j++) {
if (bucket[j] >= 0) {
if (bucket[i][j] < RAND_MAX) {
rand_nums[position] = bucket[j];
position++;
}
}
}
//time_stop = MPI_Wtime() - time_start;
if (rank_0 == 0) {
free(rand_nums);
printf("%d processes, %d elements, %.2lf seconds", thread_count, total_elements, time_stop);
}
}
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <mpi.h>
#include <assert.h>
int *create_rand_nums(int num_elements) {
int *rand_nums = (int *)malloc(sizeof(int) * num_elements);
assert(rand_nums != NULL);
int i;
for (i = 0; i < num_elements; i++) {
rand_nums[i] = rand() % num_elements;
}
return rand_nums;
}
int main(int argc, char** argv) {
if (argc != 3) {
fprintf(stderr, "Usage: avg num_elements num_thread\n");
}
int world_rank;
int i, j, k, l, t, rc, max = -RAND_MAX, min = RAND_MAX;
int total_element = atoi(argv[1]);
int total_thread = atoi(argv[2]);
int range;
int inmsg[total_elements];
int bucket[total_elements];
int *rand_nums = NULL;
//double time_start, time_stop;
//if (world_rank == 0) {
rand_nums = create_rand_nums(total_elements);
for (i = 0; i < total_elements; i++) {
if (rand_nums[i] > max)
max = rand_nums[i];
if (rand_nums[i] < min)
min = rand_nums[i];
}
range = (max - min) / total_thread + ((max - min) % total_thread > 0);
//time_start = MPI_Wtime();
# pragma omp parallel for schedule(static) default(none) \
shared(rand_nums) private(i, bucket, j,k) \ num_threads(thread_count)
for (i = 0; i < total_thread; i++) {
k = 0;
for (j = 0; j < total_elements; j++) {
if ((rand_nums[j] < i*range+range) && (rand_nums[j] >= i*range)) {
bucket[k] = rand_nums[j];
k++;
}
}
for (l = k; l < total_elements; l++) {
bucket[l] = -1;
}
//rc = MPI_Send(bucket[i], total_elements+1, MPI_INT, i, 1, MPI_COMM_WORLD);
}
for (i = 1 ; i < total_elements-1; i++)// {
for (j = i; j < total_elements; j++)
if (bucket[i] > bucket[j]) {
int temp = bucket[i];
bucket[i] = bucket[j];
bucket[j] = temp;
}
/*j = i;
while ( j > 0 && bucket[0][j] < bucket[0][j-1]) {
t = bucket[0][j];
bucket[0][j] = bucket[0][j-1];
bucket[0][j-1] = t;
j--;
}
}*/
//} else {
//rc = MPI_Recv(inmsg, total_elements+1, MPI_INT, 0, 1, MPI_COMM_WORLD, &Stat);
//Sort
for (i = 0; i < total_elements-1; i++)
for (j = i+1; j < total_elements; j++)
if (bucket[i] > bucket[j]) {
int temp = bucket[i];
bucket[i] = bucket[j];
bucket[j] = temp;
}
/* for (i = 1 ; i < total_elements; i++) {
j = i;
while ( j > 0 && inmsg[j] < inmsg[j-1]) {
t = inmsg[j];
inmsg[j] = inmsg[j-1];
inmsg[j-1] = t;
j--;
}
} */
//rc = MPI_Send(inmsg, total_elements+1, MPI_INT, 0, 1, MPI_COMM_WORLD);
//}
//if (world_rank == 0) {
k = 0;
for (j = 0; j < total_elements; j++) {
if (bucket[j] >= 0) {
rand_nums[k] = bucket[j];
k++;
}
}
for (i = 1; i < total_thread; i++) {
//rc = MPI_Recv(bucket[i], total_elements+1, MPI_INT, i, 1, MPI_COMM_WORLD, &Stat);
for (j = 0; j < total_elements; j++) {
if (bucket[j] >= 0) {
rand_nums[k] = bucket[j];
k++;
}
}
}
//time_stop = MPI_Wtime() - time_start;
//}
//if (world_rank == 0) {
free(rand_nums);
printf("%d processes, %d elements, %.2lf seconds", total_thread, total_elements/*, time_stop*/);
//}
}
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment