diff --git a/src/par b/src/par
index 8bf4ea24047dd88c1e67dfcc6830ff4f8b2f82c4..ffafc83fe9e030b3ca6386844af9bd6f87ef3bf2 100644
Binary files a/src/par and b/src/par differ
diff --git a/src/parallel.c b/src/parallel.c
index ebf45623419aa2dcef6a04ae234c9d45ce7ac9fb..2f3cec54697f6cdfe811a0c627be4bfefcea53f2 100644
--- a/src/parallel.c
+++ b/src/parallel.c
@@ -289,6 +289,8 @@ int main() {
 	
     // 10 / 4 = 2 2 2 (10-6)
     int kernel_row, kernel_col, target_row, target_col, num_targets;
+	int counter_distribute = 0;
+	int counter_receiver = 0;
 	
 	// reads kernel's row and column and initalize kernel matrix from input
 	scanf("%d %d", &kernel_row, &kernel_col);
@@ -299,7 +301,7 @@ int main() {
 	// initialize array of matrices and array of data ranges (int)
 	scanf("%d %d %d", &num_targets, &target_row, &target_col);
 	Matrix* arr_mat = (Matrix*)malloc(num_targets * sizeof(Matrix));
-	Matrix* arr_mat2 = (Matrix*)malloc(num_targets * sizeof(Matrix));
+
 	int arr_range[num_targets];
     
 	
@@ -310,8 +312,7 @@ int main() {
 
     print_matrix(&kernel);
 
-	int counter_distribute = 0;
-	int counter_receiver = 0;
+	
 
     printf("HELLO OPEN MPI\n");
 
@@ -354,77 +355,206 @@ int main() {
 
     6 7 8
 
-    9
+    9 10 11
 
     */ 
 
-    int root = 0;
+	int root = 0;
+	int divide = num_targets/(world_size-1);
+
+	if (world_rank == root){
+		// Divide input
+		int i;
+		int j = 0;
+		// int k = 0;
+		int sisa = num_targets % (world_size-1);
+		for (int rank = 0; rank < world_size; rank++) {
+            printf("I = %d\n", rank);
+			if (rank != world_rank) {
+				int jumlah_diambil = divide;
+				printf("jumlah diambil pre-sisa: %d\n", jumlah_diambil);
+				if (sisa != 0){
+					jumlah_diambil += 1;
+					sisa -= 1;
+				}
+				printf("jumlah diambil post-sisa: %d\n", jumlah_diambil);
+
+				MPI_Send(&kernel, 1, mat_MPI, rank, 0, MPI_COMM_WORLD);
+				MPI_Send(&jumlah_diambil, 1, MPI_INT, rank, 1, MPI_COMM_WORLD);
+				
+				for (int k=0; k < jumlah_diambil; k++){
+					MPI_Send(&arr_mat[j], 1, mat_MPI, rank, 2+k, MPI_COMM_WORLD);
+					j++;
+				}
+				
+				printf("Checkpoint A\n");
+			}
+		}
+
+
+		printf("BACK TO PROCESS 0");
+		// Now receive the message with the allocated buffer
+		int idxArrRange = 0;
+        for (int i = 0; i < world_size; i++ ){
+			if (i != root){
+                MPI_Status status;
+                // Probe for an incoming message from process zero
+                MPI_Probe(i, 0, MPI_COMM_WORLD, &status);
 
+                // When probe returns, the status object has the size and other
+                // attributes of the incoming message. Get the message size
+                int number_amount;
+                MPI_Get_count(&status, MPI_INT, &number_amount);
 
+                // Allocate a buffer to hold the incoming numbers
+                int* number_buf = (int*)malloc(sizeof(int) * number_amount);
 
-    while (counter_distribute < PING_PONG_LIMIT){
-        // distribute(arr_mat[counter_distribute],1,mat_MPI,0, MPI_COMM_WORLD)
-		printf("Counter dis %d \n", counter_distribute);
-		if (world_rank == root) {
-			// If we are the root process, send our data to every process
-            int i;
-            for (i = 0; i < world_size; i++) {
-                if (i != world_rank) {
-                    MPI_Send(&arr_mat[counter_distribute], 1, mat_MPI, i, 1, MPI_COMM_WORLD);
-                    MPI_Send(&kernel, 1, mat_MPI, i, 0, MPI_COMM_WORLD);
-                    counter_distribute++;
-                }
+
+                MPI_Recv(number_buf, number_amount, MPI_INT, i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+                printf("\nPROCESS 0 IN RECEIVING\n");
+                print_array(number_buf,number_amount);
+                //[3]
+                //[46]
+                //[22]
+
+				for (int j=0; j<number_amount; j++){
+					arr_range[idxArrRange] = number_buf[j];
+					idxArrRange++;
+				}
             }
+		}
+	} else {	
+		Matrix kernel_recv;
+		MPI_Recv(&kernel_recv, 1, mat_MPI, root, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+
+        int totalData;
+		MPI_Recv(&totalData, 1, MPI_INT, root, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+		printf("Total datanya %d \n", totalData);
+
+		print_matrix(&kernel_recv);
+
+		Matrix* arr_mat2 = (Matrix*)malloc(totalData * sizeof(Matrix));
+
+		for (int i=0; i<totalData; i++){
+            Matrix mat_recv;
+            MPI_Recv(&mat_recv, 1, mat_MPI, root, i+2, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+			printf("Hasil passing\n");
+			print_matrix(&mat_recv);
+            
+            arr_mat2[i] = convolution(&kernel_recv, &mat_recv);
+            
+            print_matrix(&arr_mat2[i]);
+			arr_range[i] = get_matrix_datarange(&arr_mat2[i]);
+		}
+
+		printf("HASILLL RANGE\n");
+		merge_sort(arr_range, 0, totalData-1);
+		print_array(arr_range,totalData);
+
+		MPI_Send(&arr_range, totalData, MPI_INT, 0, 0, MPI_COMM_WORLD);
+	}
+
+
+    // if (world_rank == 0){
+	// 	printf("BACK TO PROCESS 0");
+	// 	// Now receive the message with the allocated buffer
+    //     for (int i = 0; i < world_size; i++ ){
+	// 		if (i != root){
+    //             MPI_Status status;
+    //             // Probe for an incoming message from process zero
+    //             MPI_Probe(i, 0, MPI_COMM_WORLD, &status);
+
+    //             // When probe returns, the status object has the size and other
+    //             // attributes of the incoming message. Get the message size
+    //             int number_amount;
+    //             MPI_Get_count(&status, MPI_INT, &number_amount);
+
+    //             // Allocate a buffer to hold the incoming numbers
+    //             int* number_buf = (int*)malloc(sizeof(int) * number_amount);
+
+
+    //             MPI_Recv(number_buf, number_amount, MPI_INT, i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+    //             printf("PROCESS 0 IN RECEIVING");
+    //             print_array(number_buf,number_amount);
+    //         }
+	// 	}
+    // }
+
+
+    // while (counter_distribute < PING_PONG_LIMIT){
+    //     // distribute(arr_mat[counter_distribute],1,mat_MPI,0, MPI_COMM_WORLD)
+	// 	printf("Counter dis %d \n", counter_distribute);
+	// 	if (world_rank == root) {
+	// 		// If we are the root process, send our data to every process
+    //         int i;
+    //         for (i = 0; i < world_size; i++) {
+    //             if (i != world_rank) {
+    //                 MPI_Send(&arr_mat[counter_distribute], 1, mat_MPI, i, 1, MPI_COMM_WORLD);
+    //                 MPI_Send(&kernel, 1, mat_MPI, i, 0, MPI_COMM_WORLD);
+                    
+    //                 counter_distribute++;
+
+    //                 MPI_Send(&counter_distribute, 1, MPI_INT, i, 2, MPI_COMM_WORLD);
+    //             }
+    //         }
 			
-		} else {
-			// If we are a receiver process, receive the data from the root
-            Matrix recv_data;
-            MPI_Recv(&recv_data, 1, mat_MPI, root, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
-            Matrix kernel_recv;
-            MPI_Recv(&kernel_recv, 1, mat_MPI, root, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
-            print_matrix(&recv_data);
-            print_matrix(&kernel_recv);
-
-			printf("HASILLL\n");
-			arr_mat2[counter_receiver] = convolution(&kernel_recv, &recv_data);
+	// 	} else {
+	// 		// If we are a receiver process, receive the data from the root
+    //         Matrix recv_data;
+    //         MPI_Recv(&recv_data, 1, mat_MPI, root, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+    //         Matrix kernel_recv;
+    //         MPI_Recv(&kernel_recv, 1, mat_MPI, root, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+    //         print_matrix(&recv_data);
+    //         print_matrix(&kernel_recv);
+    //         MPI_Recv(&counter_distribute, 1, mat_MPI, root, 2, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+	// 		printf("HASILLL\n");
+	// 		arr_mat2[counter_receiver] = convolution(&kernel_recv, &recv_data);
             
-            print_matrix(&arr_mat2[counter_receiver]);
-			arr_range[counter_receiver] = get_matrix_datarange(&arr_mat2[counter_receiver]);
+    //         print_matrix(&arr_mat2[counter_receiver]);
+	// 		arr_range[counter_receiver] = get_matrix_datarange(&arr_mat2[counter_receiver]);
+    //         counter_distribute = counter_distribute;
+	// 		printf("Counter reciver now %d \n", counter_distribute);
+    //         printf("Counter disribute now %d \n", counter_distribute);
+            
+	// 		counter_receiver++;
+	// 	}
+    // }
 
-			counter_receiver++;
-		}
-    }
+    printf("HOOOOOOOOOOOOOOO\n");
+	
 
-	MPI_Finalize();
-    
+    MPI_Finalize();
 
-    // // Print hasil
+	// print_array(arr_range,num_targets);
+    // Print hasil
 	// for (int i = 0; i < num_targets; i++) {
 	// 	printf("\nMATRIX CONV %d",i);
 	// 	print_matrix(arr_mat2); 
 	// }
 
-	// // sort the data range array
-	// printf("\n");
-	// print_array(arr_range,num_targets);
-	// merge_sort(arr_range, 0, num_targets - 1);
+	// sort the data range array
+	printf("BAWAHNYA PRINT ARRAY\n");
+	print_array(arr_range,num_targets);
+	merge_sort(arr_range, 0, num_targets - 1);
+
+
+    printf("AFTER SORT\n");
+	print_array(arr_range,num_targets);
 	
-	// int median = get_median(arr_range, num_targets);	
-	// int floored_mean = get_floored_mean(arr_range, num_targets); 
+	int median = get_median(arr_range, num_targets);	
+	int floored_mean = get_floored_mean(arr_range, num_targets); 
 
-	// // print the min, max, median, and floored mean of data range array
-	// printf("%d\n%d\n%d\n%d\n", 
-	// 		arr_range[0], 
-	// 		arr_range[num_targets - 1], 
-	// 		median, 
-	// 		floored_mean);
+	// print the min, max, median, and floored mean of data range array
+	printf("HASIL FINALLLL\n");
+	printf("MIN : %d\nMAX : %d\nMedian : %d\nRata-Rata : %d\n", 
+			arr_range[0], 
+			arr_range[num_targets - 1], 
+			median, 
+			floored_mean);
 
     
 	
-    // START OPEN MP	
-	
-	
-    
+    // START OPEN MP
 	
 	return 0;
 }
diff --git a/src/parallel2.c b/src/parallel2.c
new file mode 100644
index 0000000000000000000000000000000000000000..f4ecd9bb66fabca6b1aa183989ef04f63b6a3882
--- /dev/null
+++ b/src/parallel2.c
@@ -0,0 +1,476 @@
+// serial.c
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <mpi.h>
+
+#define NMAX 100
+#define DATAMAX 1000
+#define DATAMIN -1000
+
+/* 
+ * Struct Matrix
+ *
+ * Matrix representation consists of matrix data 
+ * and effective dimensions 
+ * */
+typedef struct Matrix {
+	int mat[NMAX][NMAX];	// Matrix cells
+	int row_eff;			// Matrix effective row
+	int col_eff;			// Matrix effective column
+} Matrix;
+
+
+/* 
+ * Procedure init_matrix
+ * 
+ * Initializing newly allocated matrix
+ * Setting all data to 0 and effective dimensions according
+ * to nrow and ncol 
+ * */
+void init_matrix(Matrix *m, int nrow, int ncol) {
+	m->row_eff = nrow;
+	m->col_eff = ncol;
+
+	for (int i = 0; i < m->row_eff; i++) {
+		for (int j = 0; j < m->col_eff; j++) {
+			m->mat[i][j] = 0;
+		}
+	}
+}
+
+
+/* 
+ * Function input_matrix
+ *
+ * Returns a matrix with values from stdin input
+ * */
+Matrix input_matrix(int nrow, int ncol) {
+	Matrix input;
+	init_matrix(&input, nrow, ncol);
+
+	for (int i = 0; i < nrow; i++) {
+		for (int j = 0; j < ncol; j++) {
+			scanf("%d", &input.mat[i][j]);
+		}
+	}
+
+	return input;
+}
+
+
+/* 
+ * Procedure print_matrix
+ * 
+ * Print matrix data
+ * */
+void print_matrix(Matrix *m) {
+	for (int i = 0; i < m->row_eff; i++) {
+		for (int j = 0; j < m->col_eff; j++) {
+			printf("%d ", m->mat[i][j]);
+		}
+		printf("\n");
+	}
+}
+
+
+/* 
+ * Function get_matrix_datarange
+ *
+ * Returns the range between maximum and minimum
+ * element of a matrix
+ * */
+int get_matrix_datarange(Matrix *m) {
+	int max = DATAMIN;
+	int min = DATAMAX;
+	for (int i = 0; i < m->row_eff; i++) {
+		for (int j = 0; j < m->col_eff; j++) {
+			int el = m->mat[i][j];
+			if (el > max) max = el;
+			if (el < min) min = el;
+		}
+	}
+
+	return max - min;
+}
+
+
+/*
+ * Function supression_op
+ *
+ * Returns the sum of intermediate value of special multiplication
+ * operation where kernel[0][0] corresponds to target[row][col]
+ * */
+int supression_op(Matrix *kernel, Matrix *target, int row, int col) {
+	int intermediate_sum = 0;
+	for (int i = 0; i < kernel->row_eff; i++) {
+		for (int j = 0; j < kernel->col_eff; j++) {
+			intermediate_sum += kernel->mat[i][j] * target->mat[row + i][col + j];
+		}
+	}
+
+	return intermediate_sum;
+}
+
+
+/* 
+ * Function convolution
+ *
+ * Return the output matrix of convolution operation
+ * between kernel and target
+ * */
+Matrix convolution(Matrix *kernel, Matrix *target) {
+	Matrix out;
+	int out_row_eff = target->row_eff - kernel->row_eff + 1;
+	int out_col_eff = target->col_eff - kernel->col_eff + 1;
+
+    // printf("kernel row %d",kernel->row_eff);
+    // printf("kernel col %d",kernel->col_eff);
+
+    // printf("out_row_eff %d %d",out_row_eff,out_col_eff);
+	
+	init_matrix(&out, out_row_eff, out_col_eff);
+
+	for (int i = 0; i < out.row_eff; i++) {
+		for (int j = 0; j < out.col_eff; j++) {
+			out.mat[i][j] = supression_op(kernel, target, i, j);
+		}
+	}
+
+	return out;
+}
+
+
+/*
+ * Procedure merge_array
+ *
+ * Merges two subarrays of n with n[left..mid] and n[mid+1..right]
+ * to n itself, with n now ordered ascendingly
+ * */
+void merge_array(int *n, int left, int mid, int right) {
+	int n_left = mid - left + 1;
+	int n_right = right - mid;
+	int iter_left = 0, iter_right = 0, iter_merged = left;
+	int arr_left[n_left], arr_right[n_right];
+
+	for (int i = 0; i < n_left; i++) {
+		arr_left[i] = n[i + left];
+	}
+
+	for (int i = 0; i < n_right; i++) {
+		arr_right[i] = n[i + mid + 1];
+	}
+
+	while (iter_left < n_left && iter_right < n_right) {
+		if (arr_left[iter_left] <= arr_right[iter_right]) {
+			n[iter_merged] = arr_left[iter_left++];
+		} else {
+			n[iter_merged] = arr_right[iter_right++];
+		}
+		iter_merged++;
+	}
+
+	while (iter_left < n_left)  {
+		n[iter_merged++] = arr_left[iter_left++];
+	}
+	while (iter_right < n_right) {
+		n[iter_merged++] = arr_right[iter_right++];
+	} 
+}
+
+
+/* 
+ * Procedure merge_sort
+ *
+ * Sorts array n with merge sort algorithm
+ * */
+void merge_sort(int *n, int left, int right) {
+	if (left < right) {
+		int mid = left + (right - left) / 2;
+
+		merge_sort(n, left, mid);
+		merge_sort(n, mid + 1, right);
+
+		merge_array(n, left, mid, right);
+	}	
+}
+ 
+
+/* 
+ * Procedure print_array
+ *
+ * Prints all elements of array n of size to stdout
+ * */
+void print_array(int *n, int size) {
+	for (int i = 0; i < size; i++ ) printf("%d ", n[i]);
+	printf("\n");
+}
+
+
+/* 
+ * Function get_median
+ *
+ * Returns median of array n of length
+ * */
+int get_median(int *n, int length) {
+	int mid = length / 2;
+	if (length & 1) return n[mid];
+
+	return (n[mid - 1] + n[mid]) / 2;
+}
+
+
+/* 
+ * Function get_floored_mean
+ *
+ * Returns floored mean from an array of integers
+ * */
+long get_floored_mean(int *n, int length) {
+	long sum = 0;
+	for (int i = 0; i < length; i++) {
+		sum += n[i];
+	}
+
+	return sum / length;
+}
+
+
+void distribute(void* data, int count, MPI_Datatype datatype, int root,
+              MPI_Comm communicator) {
+	int world_rank;
+	MPI_Comm_rank(communicator, &world_rank);
+	int world_size;
+	MPI_Comm_size(communicator, &world_size);
+
+	// Procecss 0 --> Kirim matriks inputan 1,2,3 ke prcess 1,2,3
+	// process 1,2,3 -> receive
+	// prcess 0 --> kirim matriks input 4,5,6, ke process 1,2,3
+	
+	if (world_rank == root) {
+	// If we are the root process, send our data to every process
+	int i;
+	for (i = 0; i < world_size; i++) {
+		if (i != world_rank) {
+		MPI_Send(data, count, datatype, i, 0, communicator);
+		}
+	}
+	} else {
+	// If we are a receiver process, receive the data from the root
+	MPI_Recv(data, count, datatype, root, 0, communicator,
+				MPI_STATUS_IGNORE);
+    
+
+	// Ngitung con sama data range		 
+	}
+}
+
+// void init_matrix_kernel() {
+// 	m->row_eff = 2;
+// 	m->col_eff = 2;
+
+// 	m->mat[0][0] = 1;
+//     m->mat[0][1] = 0;
+//     m->mat[1][0] = 0;
+//     m->mat[1][1] = -1;
+// }
+
+// main() driver
+int main() {
+	// OPEN MPI
+	MPI_Init(NULL, NULL);
+
+	int world_rank;
+    MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
+    int world_size;
+    MPI_Comm_size(MPI_COMM_WORLD, &world_size);
+
+	const int PING_PONG_LIMIT = 3;
+    printf("HELLO WORLD\n");
+	
+    // 10 / 4 = 2 2 2 (10-6)
+    int kernel_row, kernel_col, target_row, target_col, num_targets;
+	int counter_distribute = 0;
+	int counter_receiver = 0;
+	
+	// reads kernel's row and column and initalize kernel matrix from input
+	scanf("%d %d", &kernel_row, &kernel_col);
+    // Matrix* kernel = (Matrix*)malloc(num_targets * sizeof(Matrix));
+	Matrix kernel = input_matrix(kernel_row, kernel_col);
+	
+	// reads number of target matrices and their dimensions.
+	// initialize array of matrices and array of data ranges (int)
+	scanf("%d %d %d", &num_targets, &target_row, &target_col);
+	Matrix* arr_mat = (Matrix*)malloc(num_targets * sizeof(Matrix));
+	Matrix* arr_mat2 = (Matrix*)malloc(num_targets * sizeof(Matrix));
+	int arr_range[num_targets];
+    
+	
+	// read each target matrix, compute their convolution matrices, and compute their data ranges
+	for (int i = 0; i < num_targets; i++) {
+		arr_mat[i] = input_matrix(target_row, target_col);	 
+	}
+
+    print_matrix(&kernel);
+
+	
+
+    printf("HELLO OPEN MPI\n");
+
+	
+
+    // --------------------------- Create the datatype ----------------------------- //
+    MPI_Datatype mat_MPI;
+    int lengths[3] = { NMAX * NMAX, 1, 1 };
+ 
+    // Calculate displacements
+    // In C, by default padding can be inserted between fields. MPI_Get_address will allow
+    // to get the address of each struct field and calculate the corresponding displacement
+    // relative to that struct base address. The displacements thus calculated will therefore
+    // include padding if any.
+    MPI_Aint displacements[3];
+    Matrix dummy_matrix;
+    MPI_Aint base_address;
+    MPI_Get_address(&dummy_matrix, &base_address);
+    MPI_Get_address(&dummy_matrix.mat, &displacements[0]);
+    MPI_Get_address(&dummy_matrix.row_eff, &displacements[1]);
+    MPI_Get_address(&dummy_matrix.col_eff, &displacements[2]);
+    displacements[0] = MPI_Aint_diff(displacements[0], base_address);
+    displacements[1] = MPI_Aint_diff(displacements[1], base_address);
+    displacements[2] = MPI_Aint_diff(displacements[2], base_address);
+ 
+    MPI_Datatype types[3] = { MPI_INT, MPI_INT, MPI_INT };
+    MPI_Type_create_struct(3, lengths, displacements, types, &mat_MPI);
+    MPI_Type_commit(&mat_MPI);
+     
+    // --------------------------- End Create the datatype ----------------------------- //
+
+	// Distribusi matriks input ke process-process
+	
+    /*
+    0 1 2 3 4 5 6 7 8 9
+
+    0 1 2
+
+    3 4 5
+
+    6 7 8
+
+    9 10 11
+
+    */ 
+
+    int root = 0;
+	int divide = num_targets/(world_size-1);
+ 	int divideInput[world_size-1]] = { NMAX * NMAX, 1, 1 };
+
+	//  [[1,2,3], [4,5,6], [7,8,9],[10]]
+
+	if (world_rank == root){
+		// Divide input
+		int i;
+		int sisa = num_targets % (world_size-1)
+		for (i = 1; i < world_size; i++) {
+			int jumlah_diambil = divide
+			if (sisa != 0){
+				jumlah_diambil += 1;
+				sisa -= 1;
+			}
+
+
+			if (i != world_rank) {
+				MPI_Send(&kernel, 1, mat_MPI, i, 0, MPI_COMM_WORLD);
+				for (int j=0; j<divide; j++){
+					MPI_Send(&arr_mat[j], 1, mat_MPI, i, 1, MPI_COMM_WORLD);
+				}
+			}
+		}
+	} else {
+		Matrix recv_data;
+		MPI_Recv(&recv_data, 1, mat_MPI, root, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+		Matrix kernel_recv;
+		MPI_Recv(&kernel_recv, 1, mat_MPI, root, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+
+
+		
+	}
+
+
+	
+    
+
+
+    while (counter_distribute < PING_PONG_LIMIT){
+        // distribute(arr_mat[counter_distribute],1,mat_MPI,0, MPI_COMM_WORLD)
+		printf("Counter dis %d \n", counter_distribute);
+		if (world_rank == root) {
+			// If we are the root process, send our data to every process
+            int i;
+            for (i = 0; i < world_size; i++) {
+                if (i != world_rank) {
+                    MPI_Send(&arr_mat[counter_distribute], 1, mat_MPI, i, 1, MPI_COMM_WORLD);
+                    MPI_Send(&kernel, 1, mat_MPI, i, 0, MPI_COMM_WORLD);
+                    
+                    counter_distribute++;
+
+                    MPI_Send(&counter_distribute, 1, MPI_INT, i, 2, MPI_COMM_WORLD);
+                }
+            }
+			
+		} else {
+			// If we are a receiver process, receive the data from the root
+            Matrix recv_data;
+            MPI_Recv(&recv_data, 1, mat_MPI, root, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+            Matrix kernel_recv;
+            MPI_Recv(&kernel_recv, 1, mat_MPI, root, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+            print_matrix(&recv_data);
+            print_matrix(&kernel_recv);
+            MPI_Recv(&counter_distribute, 1, mat_MPI, root, 2, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+			printf("HASILLL\n");
+			arr_mat2[counter_receiver] = convolution(&kernel_recv, &recv_data);
+            
+            print_matrix(&arr_mat2[counter_receiver]);
+			arr_range[counter_receiver] = get_matrix_datarange(&arr_mat2[counter_receiver]);
+            counter_distribute = counter_distribute;
+			printf("Counter reciver now %d \n", counter_distribute);
+            printf("Counter disribute now %d \n", counter_distribute);
+            
+			counter_receiver++;
+		}
+    }
+
+    printf("HOOOOOOOOOOOOOOO");
+	
+
+    MPI_Finalize();
+
+	print_array(arr_range,num_targets);
+    // Print hasil
+	for (int i = 0; i < num_targets; i++) {
+		printf("\nMATRIX CONV %d",i);
+		print_matrix(arr_mat2); 
+	}
+
+	// sort the data range array
+	printf("\n");
+	print_array(arr_range,num_targets);
+	merge_sort(arr_range, 0, num_targets - 1);
+	
+	int median = get_median(arr_range, num_targets);	
+	int floored_mean = get_floored_mean(arr_range, num_targets); 
+
+	// print the min, max, median, and floored mean of data range array
+	printf("%d\n%d\n%d\n%d\n", 
+			arr_range[0], 
+			arr_range[num_targets - 1], 
+			median, 
+			floored_mean);
+
+    
+	
+    // START OPEN MP	
+	
+	
+    
+	
+	return 0;
+}