diff --git a/bucket b/bucket
new file mode 100755
index 0000000000000000000000000000000000000000..6c0a534a1f9ffbf8abe87342dc1171f7abdf44fe
Binary files /dev/null and b/bucket differ
diff --git a/bucket.c b/bucket.c
new file mode 100644
index 0000000000000000000000000000000000000000..33448cc8fe31cb4010c16484fd3d0a7801354088
--- /dev/null
+++ b/bucket.c
@@ -0,0 +1,154 @@
+#include "mpi.h"
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <time.h>
+#include <assert.h>
+ 
+int main( int argc, char* argv[] )
+{
+    double starttime, endtime;
+    int proceso_id;
+    int num_element;
+    int m;
+    int i,d,li,c,t;
+    unsigned int j;
+    char processor_name[MPI_MAX_PROCESSOR_NAME];
+    int namelen;
+    int numprocsused;
+    // Intiating parallel part
+    MPI_Status stat;
+    MPI_Init(NULL,NULL);
+        
+        MPI_Comm_size(MPI_COMM_WORLD, &numprocsused);
+        MPI_Comm_rank(MPI_COMM_WORLD,&proceso_id);
+        MPI_Get_processor_name(processor_name, &namelen);
+        unsigned int receivedElement;
+        if(proceso_id == 0) {
+            // if it is main process
+ 	    printf("Enter number of elements\n");
+	    scanf("%d",&num_element);
+	unsigned int *n = malloc (num_element * sizeof(unsigned int));
+	    for (i= 0; i < num_element; i++){
+	    	n[i] = rand() % num_element;
+            }
+ 
+            // starting time calculation of the sort
+            starttime = MPI_Wtime();
+ 
+            // min and max values are got
+            unsigned int min = n[0];
+            unsigned int max = n[0];
+            for(i=0; i < num_element; i++) {
+                if(n[i] < min) { min = n[i]; }
+                if(n[i] > max) { max = n[i]; }
+            }
+ 
+            // calculating how many numbers each bucket/process will get numbers
+            int *elementQtyArray = malloc (sizeof(int) *numprocsused);
+            // default values
+            for(d=1; d < numprocsused; d++) {
+                elementQtyArray[d] = 0;
+            }
+            for(d=0; d < num_element; d++) {
+                int increaseOf = max/(numprocsused-1);
+                int iteration = 1;
+                int pridetas = 0;
+                for(j = increaseOf; j <= max; j = j + increaseOf) {
+                    if(n[d] <= j) {
+                        elementQtyArray[iteration]++;
+                        pridetas = 1;
+                        break;
+                    }
+                    iteration++;
+                }
+                if (pridetas == 1) {
+                    elementQtyArray[iteration-1]++;
+                }
+            }
+ 
+            // Sending how many each process/bucket will get numbers
+            for(i=1; i<numprocsused; i++) {
+                MPI_Send(&elementQtyArray[i], 1, MPI_INT, i, 0, MPI_COMM_WORLD);
+            }
+
+            // doing the same, this time sending the numbers
+            for(d=0; d < num_element; d++) {
+                int increaseOf = max/(numprocsused-1);
+                int iteration = 1;
+                int issiunte = 0;
+                for (j = increaseOf; j <= max; j = j + increaseOf) {
+                    if(n[d] <= j) {
+                        MPI_Send(&n[d], 1, MPI_UNSIGNED, iteration, 1, MPI_COMM_WORLD);
+                        issiunte = 1;
+                        break;
+                    }
+                    iteration++;
+                }
+                if (issiunte == 1) {
+                    MPI_Send(&n[d], 1, MPI_UNSIGNED, iteration-1, 1, MPI_COMM_WORLD);
+                }
+          }
+	//RUSAK
+            // Getting back results and adding them to one array
+            int lastIndex = 0; int indexi = 0;
+            for(i=1; i < numprocsused; i++) {
+                unsigned int * recvArray = malloc(sizeof(unsigned int)*elementQtyArray[i]);
+                MPI_Recv(&recvArray[0], elementQtyArray[i], MPI_UNSIGNED, i, 2, MPI_COMM_WORLD, &stat);
+
+                if(lastIndex == 0) {
+                    lastIndex = elementQtyArray[i];
+
+                }
+                for(j=0; j<elementQtyArray[i]; j++) {
+		    if(recvArray[j] == 0) {}
+		    else{
+		    n[indexi] = recvArray[j];
+		    indexi++;
+		    }
+                
+                }
+
+            }
+ 
+
+            // stoping the time
+
+            endtime   = MPI_Wtime();
+
+ 
+
+            // showing results in array
+	    for(c = 0; c<num_element; c++){	
+			
+            	printf("Hasil Sorting index - %d : %d \n",c,n[c]);
+	    }
+            // sorting results
+            printf("it took %f seconds \n", endtime-starttime);
+            printf("Numbers: %d \n", num_element);
+            printf("Processes:  %d \n", numprocsused);
+        } else {
+		int elementQtyUsed;
+		MPI_Recv(&elementQtyUsed, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &stat);
+		unsigned int *localArray[elementQtyUsed];
+		for (li = 0; li < elementQtyUsed; li++){
+			MPI_Recv(&receivedElement, 1, MPI_UNSIGNED, 0, 1, MPI_COMM_WORLD, &stat);
+			localArray[li] = receivedElement;
+		}
+		for (c = 1; c <= elementQtyUsed -1; c++){
+			d = c;
+			while (d > 0 && localArray[d] < localArray[d-1]){
+				t = localArray[d];
+				localArray[d] = localArray[d-1];
+				localArray[d-1] = t;
+				d--;
+			}
+		}
+		MPI_Send(localArray, elementQtyUsed, MPI_UNSIGNED, 0, 2, MPI_COMM_WORLD);
+	}
+
+    MPI_Finalize();
+    return 0;
+}
+
+
diff --git a/laporan_bucket.txt b/laporan_bucket.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f71ba67a12593b7d7d529caee85bf121163f7e5c
--- /dev/null
+++ b/laporan_bucket.txt
@@ -0,0 +1,24 @@
+/*Oleh : Steven Andianto 13513018 dan Luminto 13513080*/
+process 4
+n=50000 	: 7.245877 secs
+n=100000 	: 12.454970 secs
+n=200000 	: 71.536755 secs
+n=400000 	: 304.791689 secs
+
+process 8
+n=50000  	: 0.823392 secs
+n=100000 	: 9.133500 secs
+n=200000 	: 11.241435 secs
+n=400000 	: 43.124797 secs
+
+process 16
+n=50000  	: 1.576477 secs
+n=100000 	: 4.026215 secs
+n=200000 	: 5.741722 secs
+n=400000 	: 24.445183 secs
+
+process 32
+n=50000  	: 0.590357 secs
+n=100000 	: 1.472241 secs
+n=200000 	: 2.470680 secs
+n=400000 	: 6.619264 secs
diff --git a/mpi_hostfile b/mpi_hostfile
new file mode 100644
index 0000000000000000000000000000000000000000..820eec6734fb3e7fc699d427076dbf7df8f7c7a1
--- /dev/null
+++ b/mpi_hostfile
@@ -0,0 +1,7 @@
+#daftar host 
+localhost 
+167.205.35.26 
+167.205.35.28 
+167.205.35.29 
+167.205.35.30 
+167.205.35.31
diff --git a/scattergather b/scattergather
new file mode 100755
index 0000000000000000000000000000000000000000..6d2b63c681c93baf2dcdbbabba7eb22906ad9ddb
Binary files /dev/null and b/scattergather differ
diff --git a/scattergather.c b/scattergather.c
new file mode 100644
index 0000000000000000000000000000000000000000..d98db5cd3479f4be38db71e5f9d9a05e1c33b603
--- /dev/null
+++ b/scattergather.c
@@ -0,0 +1,78 @@
+#include <stdio.h> 
+#include <stdlib.h> 
+#include <time.h> 
+#include <mpi.h> 
+#include <assert.h> 
+ 
+float *create_rand_nums(int num_elements) { 
+float *rand_nums = (float *)malloc(sizeof(float) * num_elements); 
+assert(rand_nums != NULL); 
+int i; 
+for (i = 0; i < num_elements; i++) { 
+rand_nums[i] = (rand() / (float)RAND_MAX); 
+} 
+return rand_nums; 
+} 
+ 
+float compute_avg(float *array, int num_elements) { 
+float sum = 0.f; 
+int i; 
+for (i = 0; i < num_elements; i++) { 
+sum += array[i]; 
+} 
+return sum / num_elements; 
+} 
+ 
+int main(int argc, char** argv) { 
+if (argc != 2) { 
+fprintf(stderr, "Usage: avg num_elements_per_proc\n"); 
+exit(1); 
+} 
+ 
+int num_elements_per_proc = atoi(argv[1]); 
+srand(time(NULL)); 
+ 
+MPI_Init(NULL, NULL); 
+ 
+int world_rank; 
+MPI_Comm_rank(MPI_COMM_WORLD, &world_rank); 
+int world_size; 
+MPI_Comm_size(MPI_COMM_WORLD, &world_size); 
+ 
+float *rand_nums = NULL; 
+if (world_rank == 0) { 
+rand_nums = create_rand_nums(num_elements_per_proc * world_size); 
+} 
+ 
+float *sub_rand_nums = (float *)malloc(sizeof(float) * 
+num_elements_per_proc); 
+assert(sub_rand_nums != NULL); 
+ 
+MPI_Scatter(rand_nums, num_elements_per_proc, MPI_FLOAT, sub_rand_nums, 
+num_elements_per_proc, MPI_FLOAT, 0, MPI_COMM_WORLD); 
+ 
+float sub_avg = compute_avg(sub_rand_nums, num_elements_per_proc); 
+ 
+float *sub_avgs = NULL; 
+if (world_rank == 0) { 
+sub_avgs = (float *)malloc(sizeof(float) * world_size); 
+assert(sub_avgs != NULL); 
+} 
+MPI_Gather(&sub_avg, 1, MPI_FLOAT, sub_avgs, 1, MPI_FLOAT, 0, 
+MPI_COMM_WORLD); 
+ 
+if (world_rank == 0) { 
+float avg = compute_avg(sub_avgs, world_size); 
+printf("Avg of all elements is %f\n", avg); 
+} 
+ 
+if (world_rank == 0) { 
+free(rand_nums); 
+free(sub_avgs); 
+} 
+free(sub_rand_nums); 
+ 
+MPI_Barrier(MPI_COMM_WORLD); 
+MPI_Finalize(); 
+}  
+