diff --git a/src/openmp b/src/openmp
new file mode 100644
index 0000000000000000000000000000000000000000..d5f231b0629376347e6225087d529027249e31cc
Binary files /dev/null and b/src/openmp differ
diff --git a/src/openmp.c b/src/openmp.c
new file mode 100644
index 0000000000000000000000000000000000000000..629c561cdc4dbd7f3277ab2b2380d5ca705b9f6c
--- /dev/null
+++ b/src/openmp.c
@@ -0,0 +1,10 @@
+#include <stdio.h>
+#include <omp.h>
+
+int main(int argc, char** argv){
+    #pragma omp parallel
+    {
+        printf("Hello from process: %d\n", omp_get_thread_num());
+    }
+    return 0;
+}
\ No newline at end of file
diff --git a/src/par b/src/par
index 23fe37ce47d562518349e7e07a6fd6c375d7f3ac..8eec9384443dde2f8e203fa8a24ecd396fca2479 100644
Binary files a/src/par and b/src/par differ
diff --git a/src/parallel.c b/src/parallel.c
index d8f4649fb43c9f80599ff6cba6c0d484df449bdc..b0ccca597b07c9260ccd88ac9f4f1dad4186ec19 100644
--- a/src/parallel.c
+++ b/src/parallel.c
@@ -3,6 +3,7 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <mpi.h>
+#include <omp.h>
 
 #define NMAX 100
 #define DATAMAX 1000
@@ -81,17 +82,41 @@ void print_matrix(Matrix *m) {
  * element of a matrix
  * */
 int get_matrix_datarange(Matrix *m) {
-	int max = DATAMIN;
-	int min = DATAMAX;
-	for (int i = 0; i < m->row_eff; i++) {
-		for (int j = 0; j < m->col_eff; j++) {
-			int el = m->mat[i][j];
-			if (el > max) max = el;
-			if (el < min) min = el;
-		}
-	}
+	int maxLocal;
+	int minLocal;
 
-	return max - min;
+	int maxGlobal;
+	int minGlobal;
+    
+    maxGlobal = DATAMIN;
+    minGlobal = DATAMAX;
+
+	#pragma omp parallel private(maxLocal, minLocal) shared(maxGlobal, minGlobal)
+    {
+        maxLocal = maxGlobal;
+        minLocal = minGlobal;
+        #pragma omp for collapse(2) nowait
+			for (int i = 0; i < m->row_eff; i++) {
+				for (int j = 0; j < m->col_eff; j++) {
+					int el = m->mat[i][j];
+					if (el > maxLocal) maxLocal = el;
+					if (el < minLocal) minLocal = el;
+				}
+			}
+        #pragma omp critical 
+        {
+            if (minLocal < minGlobal) {
+                minGlobal = minLocal;
+            }
+
+            if (maxLocal > maxGlobal){
+                maxGlobal = maxLocal;
+            }
+        }
+    }    
+
+    printf("MAXGLOBAL : %d, MIN GLOBAL : %d",maxGlobal,minGlobal);
+	return maxGlobal - minGlobal;
 }
 
 
@@ -102,16 +127,37 @@ int get_matrix_datarange(Matrix *m) {
  * operation where kernel[0][0] corresponds to target[row][col]
  * */
 int supression_op(Matrix *kernel, Matrix *target, int row, int col) {
-	int intermediate_sum = 0;
-	for (int i = 0; i < kernel->row_eff; i++) {
-		for (int j = 0; j < kernel->col_eff; j++) {
-			intermediate_sum += kernel->mat[i][j] * target->mat[row + i][col + j];
-		}
+	int partial_Sum;
+	int total_Sum;
+    #pragma omp parallel private(partial_Sum) shared(total_Sum)
+    {
+		partial_Sum = 0;
+        total_Sum = 0;
+		#pragma omp for collapse(2)
+           for (int i = 0; i < kernel->row_eff; i++) {
+                for (int j = 0; j < kernel->col_eff; j++) {
+                    partial_Sum = kernel->mat[i][j] * target->mat[row + i][col + j];
+                }
+		   }
+
+        #pragma omp critical
+			// /add each threads partial sum to the total sum
+			total_Sum += partial_Sum;
 	}
-
-	return intermediate_sum;
+	return total_Sum;
 }
 
+// int supression_op(Matrix *kernel, Matrix *target, int row, int col) {
+// 	int intermediate_sum = 0;
+// 	for (int i = 0; i < kernel->row_eff; i++) {
+// 		for (int j = 0; j < kernel->col_eff; j++) {
+// 			intermediate_sum += kernel->mat[i][j] * target->mat[row + i][col + j];
+// 		}
+// 	}
+
+// 	return intermediate_sum;
+// }
+
 
 /* 
  * Function convolution
@@ -123,11 +169,6 @@ Matrix convolution(Matrix *kernel, Matrix *target) {
 	Matrix out;
 	int out_row_eff = target->row_eff - kernel->row_eff + 1;
 	int out_col_eff = target->col_eff - kernel->col_eff + 1;
-
-    // printf("kernel row %d",kernel->row_eff);
-    // printf("kernel col %d",kernel->col_eff);
-
-    // printf("out_row_eff %d %d",out_row_eff,out_col_eff);
 	
 	init_matrix(&out, out_row_eff, out_col_eff);
 
@@ -235,46 +276,6 @@ long get_floored_mean(int *n, int length) {
 }
 
 
-void distribute(void* data, int count, MPI_Datatype datatype, int root,
-              MPI_Comm communicator) {
-	int world_rank;
-	MPI_Comm_rank(communicator, &world_rank);
-	int world_size;
-	MPI_Comm_size(communicator, &world_size);
-
-	// Procecss 0 --> Kirim matriks inputan 1,2,3 ke prcess 1,2,3
-	// process 1,2,3 -> receive
-	// prcess 0 --> kirim matriks input 4,5,6, ke process 1,2,3
-	
-	if (world_rank == root) {
-	// If we are the root process, send our data to every process
-	int i;
-	for (i = 0; i < world_size; i++) {
-		if (i != world_rank) {
-		MPI_Send(data, count, datatype, i, 0, communicator);
-		}
-	}
-	} else {
-	// If we are a receiver process, receive the data from the root
-	MPI_Recv(data, count, datatype, root, 0, communicator,
-				MPI_STATUS_IGNORE);
-    
-
-	// Ngitung con sama data range		 
-	}
-}
-
-// void init_matrix_kernel() {
-// 	m->row_eff = 2;
-// 	m->col_eff = 2;
-
-// 	m->mat[0][0] = 1;
-//     m->mat[0][1] = 0;
-//     m->mat[1][0] = 0;
-//     m->mat[1][1] = -1;
-// }
-
-// main() driver
 int main() {
 	// OPEN MPI
 	MPI_Init(NULL, NULL);
@@ -283,11 +284,7 @@ int main() {
     MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
     int world_size;
     MPI_Comm_size(MPI_COMM_WORLD, &world_size);
-
-	const int PING_PONG_LIMIT = 3;
-    printf("HELLO WORLD\n");
 	
-    // 10 / 4 = 2 2 2 (10-6)
     int kernel_row, kernel_col, target_row, target_col, num_targets;
 	int counter_distribute = 0;
 	int counter_receiver = 0;
@@ -310,14 +307,6 @@ int main() {
 		arr_mat[i] = input_matrix(target_row, target_col);	 
 	}
 
-    print_matrix(&kernel);
-
-	
-
-    printf("HELLO OPEN MPI\n");
-
-	
-
     // --------------------------- Create the datatype ----------------------------- //
     MPI_Datatype mat_MPI;
     int lengths[3] = { NMAX * NMAX, 1, 1 };
@@ -373,7 +362,6 @@ int main() {
 					MPI_Send(&arr_mat[j], 1, mat_MPI, rank, 2+k, MPI_COMM_WORLD);
 					j++;
 				}
-				
 				printf("Checkpoint A\n");
 			}
 		}
@@ -407,6 +395,9 @@ int main() {
             }
 		}
 
+
+		// Showing the result
+		
 		// sort the data range array
 		printf("BAWAHNYA PRINT ARRAY\n");
 		print_array(arr_range,num_targets);
@@ -458,95 +449,6 @@ int main() {
 		MPI_Send(&arr_range, totalData, MPI_INT, 0, 0, MPI_COMM_WORLD);
 	}
 
-
-    // if (world_rank == 0){
-	// 	printf("BACK TO PROCESS 0");
-	// 	// Now receive the message with the allocated buffer
-    //     for (int i = 0; i < world_size; i++ ){
-	// 		if (i != root){
-    //             MPI_Status status;
-    //             // Probe for an incoming message from process zero
-    //             MPI_Probe(i, 0, MPI_COMM_WORLD, &status);
-
-    //             // When probe returns, the status object has the size and other
-    //             // attributes of the incoming message. Get the message size
-    //             int number_amount;
-    //             MPI_Get_count(&status, MPI_INT, &number_amount);
-
-    //             // Allocate a buffer to hold the incoming numbers
-    //             int* number_buf = (int*)malloc(sizeof(int) * number_amount);
-
-
-    //             MPI_Recv(number_buf, number_amount, MPI_INT, i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
-    //             printf("PROCESS 0 IN RECEIVING");
-    //             print_array(number_buf,number_amount);
-    //         }
-	// 	}
-    // }
-
-
-    // while (counter_distribute < PING_PONG_LIMIT){
-    //     // distribute(arr_mat[counter_distribute],1,mat_MPI,0, MPI_COMM_WORLD)
-	// 	printf("Counter dis %d \n", counter_distribute);
-	// 	if (world_rank == root) {
-	// 		// If we are the root process, send our data to every process
-    //         int i;
-    //         for (i = 0; i < world_size; i++) {
-    //             if (i != world_rank) {
-    //                 MPI_Send(&arr_mat[counter_distribute], 1, mat_MPI, i, 1, MPI_COMM_WORLD);
-    //                 MPI_Send(&kernel, 1, mat_MPI, i, 0, MPI_COMM_WORLD);
-                    
-    //                 counter_distribute++;
-
-    //                 MPI_Send(&counter_distribute, 1, MPI_INT, i, 2, MPI_COMM_WORLD);
-    //             }
-    //         }
-			
-	// 	} else {
-	// 		// If we are a receiver process, receive the data from the root
-    //         Matrix recv_data;
-    //         MPI_Recv(&recv_data, 1, mat_MPI, root, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
-    //         Matrix kernel_recv;
-    //         MPI_Recv(&kernel_recv, 1, mat_MPI, root, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
-    //         print_matrix(&recv_data);
-    //         print_matrix(&kernel_recv);
-    //         MPI_Recv(&counter_distribute, 1, mat_MPI, root, 2, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
-	// 		printf("HASILLL\n");
-	// 		arr_mat2[counter_receiver] = convolution(&kernel_recv, &recv_data);
-            
-    //         print_matrix(&arr_mat2[counter_receiver]);
-	// 		arr_range[counter_receiver] = get_matrix_datarange(&arr_mat2[counter_receiver]);
-    //         counter_distribute = counter_distribute;
-	// 		printf("Counter reciver now %d \n", counter_distribute);
-    //         printf("Counter disribute now %d \n", counter_distribute);
-            
-	// 		counter_receiver++;
-	// 	}
-    // }
-
-	// // sort the data range array
-	// printf("BAWAHNYA PRINT ARRAY\n");
-	// print_array(arr_range,num_targets);
-	// merge_sort(arr_range, 0, num_targets - 1);
-
-
-    // printf("AFTER SORT\n");
-	// print_array(arr_range,num_targets);
-	
-	// int median = get_median(arr_range, num_targets);	
-	// int floored_mean = get_floored_mean(arr_range, num_targets); 
-	// // int floored_mean = 10;
-
-	// // print the min, max, median, and floored mean of data range array
-	// printf("HASIL FINALLLL\n");
-	// printf("MIN : %d\nMAX : %d\nMedian : %d\nRata-Rata : %d\n", 
-	// 		arr_range[0], 
-	// 		arr_range[num_targets - 1], 
-	// 		median, 
-	// 		floored_mean);
-
-    // // START OPEN MP
-
 	MPI_Finalize();
 	return 0;
 }