From 8082e9acf245a85b144371dfb059348b152e6f22 Mon Sep 17 00:00:00 2001
From: azharfatrr <13519020@std.stei.itb.ac.id>
Date: Thu, 17 Mar 2022 23:06:35 +0700
Subject: [PATCH] fix: get datarange

---
 makefile                              |   2 +-
 result/K01-03-TC1_parallel.txt        |   2 +-
 result/K01-03-TC1_parallel_collab.txt |   2 +-
 result/K01-03-TC1_serial.txt          |   3 +-
 result/K01-03-TC2_parallel.txt        |   2 +-
 result/K01-03-TC2_parallel_collab.txt |   2 +-
 result/K01-03-TC2_serial.txt          |   2 +-
 result/K01-03-TC3_parallel.txt        |   2 +-
 result/K01-03-TC3_parallel_collab.txt |   2 +-
 result/K01-03-TC3_serial.txt          |   2 +-
 result/K01-03-TC4_parallel.txt        |   2 +-
 result/K01-03-TC4_parallel_collab.txt |   2 +-
 result/K01-03-TC4_serial.txt          |   3 +-
 src/parallel_collab.cu                |  61 ++-
 src/serial.c                          | 266 +------------
 src/serial_real.c                     | 277 -------------
 src/serial_test.c                     | 539 ++++++++++++++++++++++++++
 17 files changed, 608 insertions(+), 563 deletions(-)
 delete mode 100644 src/serial_real.c
 create mode 100644 src/serial_test.c

diff --git a/makefile b/makefile
index 6f361dd..ccac332 100644
--- a/makefile
+++ b/makefile
@@ -1,4 +1,4 @@
-SOURCE_SERIAL=./src/serial_real.c
+SOURCE_SERIAL=./src/serial.c
 SOURCE_PARALLEL=./src/parallel.cu
 SOURCE_TEST=./src/test.c
 SOURCE_PARALLEL_COLLAB=./src/parallel_collab.cu
diff --git a/result/K01-03-TC1_parallel.txt b/result/K01-03-TC1_parallel.txt
index 3c886f1..04a2a8a 100644
--- a/result/K01-03-TC1_parallel.txt
+++ b/result/K01-03-TC1_parallel.txt
@@ -3,4 +3,4 @@
 10114197
 10323010
 
-Runtime: 0.162468 s
+Runtime: 0.155827 s
diff --git a/result/K01-03-TC1_parallel_collab.txt b/result/K01-03-TC1_parallel_collab.txt
index 2dca374..f91c914 100644
--- a/result/K01-03-TC1_parallel_collab.txt
+++ b/result/K01-03-TC1_parallel_collab.txt
@@ -3,4 +3,4 @@
 10114197
 10323010
 
-Runtime: 0.163408 s
+Runtime: 0.144829 s
diff --git a/result/K01-03-TC1_serial.txt b/result/K01-03-TC1_serial.txt
index d747dfe..989f60d 100644
--- a/result/K01-03-TC1_serial.txt
+++ b/result/K01-03-TC1_serial.txt
@@ -1,7 +1,6 @@
-yey
 8539213
 11916317
 10114197
 10323010
 
-Runtime: 0.015577 s
+Runtime: 0.014589 s
diff --git a/result/K01-03-TC2_parallel.txt b/result/K01-03-TC2_parallel.txt
index af4e4b0..b594a37 100644
--- a/result/K01-03-TC2_parallel.txt
+++ b/result/K01-03-TC2_parallel.txt
@@ -3,4 +3,4 @@
 37739803
 38222937
 
-Runtime: 0.857004 s
+Runtime: 0.833981 s
diff --git a/result/K01-03-TC2_parallel_collab.txt b/result/K01-03-TC2_parallel_collab.txt
index 760c0b3..aafe92c 100644
--- a/result/K01-03-TC2_parallel_collab.txt
+++ b/result/K01-03-TC2_parallel_collab.txt
@@ -3,4 +3,4 @@
 37739803
 38222937
 
-Runtime: 0.446493 s
+Runtime: 0.422364 s
diff --git a/result/K01-03-TC2_serial.txt b/result/K01-03-TC2_serial.txt
index b07814f..d21e02c 100644
--- a/result/K01-03-TC2_serial.txt
+++ b/result/K01-03-TC2_serial.txt
@@ -3,4 +3,4 @@
 37739803
 38222937
 
-Runtime: 0.715588 s
+Runtime: 0.689801 s
diff --git a/result/K01-03-TC3_parallel.txt b/result/K01-03-TC3_parallel.txt
index 67a2084..bff74c6 100644
--- a/result/K01-03-TC3_parallel.txt
+++ b/result/K01-03-TC3_parallel.txt
@@ -3,4 +3,4 @@
 23198319
 23380111
 
-Runtime: 0.984349 s
+Runtime: 0.988793 s
diff --git a/result/K01-03-TC3_parallel_collab.txt b/result/K01-03-TC3_parallel_collab.txt
index ee9fbc6..98ed976 100644
--- a/result/K01-03-TC3_parallel_collab.txt
+++ b/result/K01-03-TC3_parallel_collab.txt
@@ -3,4 +3,4 @@
 23198319
 23380111
 
-Runtime: 0.722501 s
+Runtime: 0.644663 s
diff --git a/result/K01-03-TC3_serial.txt b/result/K01-03-TC3_serial.txt
index 7232575..7cb8cc0 100644
--- a/result/K01-03-TC3_serial.txt
+++ b/result/K01-03-TC3_serial.txt
@@ -3,4 +3,4 @@
 23198319
 23380111
 
-Runtime: 0.817598 s
+Runtime: 0.815450 s
diff --git a/result/K01-03-TC4_parallel.txt b/result/K01-03-TC4_parallel.txt
index 3fa6f6a..b1f42bf 100644
--- a/result/K01-03-TC4_parallel.txt
+++ b/result/K01-03-TC4_parallel.txt
@@ -3,4 +3,4 @@
 51451884
 51774352
 
-Runtime: 9.561980 s
+Runtime: 9.862558 s
diff --git a/result/K01-03-TC4_parallel_collab.txt b/result/K01-03-TC4_parallel_collab.txt
index b6c4758..af651ca 100644
--- a/result/K01-03-TC4_parallel_collab.txt
+++ b/result/K01-03-TC4_parallel_collab.txt
@@ -3,4 +3,4 @@
 51451884
 51774352
 
-Runtime: 4.290069 s
+Runtime: 4.018500 s
diff --git a/result/K01-03-TC4_serial.txt b/result/K01-03-TC4_serial.txt
index 0f63ac9..789cfe3 100644
--- a/result/K01-03-TC4_serial.txt
+++ b/result/K01-03-TC4_serial.txt
@@ -1,7 +1,6 @@
-yey
 41250811
 71841136
 51451884
 51774352
 
-Runtime: 8.956075 s
+Runtime: 8.795971 s
diff --git a/src/parallel_collab.cu b/src/parallel_collab.cu
index 5d64706..ffb6074 100644
--- a/src/parallel_collab.cu
+++ b/src/parallel_collab.cu
@@ -513,6 +513,39 @@ __global__ void d_cuda_convolution(int* d_out_mat, int* arr_mat_rm, int* kernel_
  	}
 }
 
+/* 
+ * Function d_get_matrix_datarange
+ * Set range between maximum and minimum
+ * for every matrix in out_mat to d_arr_range
+ * */
+__global__ void d_get_matrix_datarange(int* out_mat, int* d_arr_range, int row_eff, int col_eff) {
+	// Determine current matrix from block;
+	int curr_mat = blockIdx.y;
+	// Calculate padding output matrix and arr_range matrix.
+	int pad = curr_mat * row_eff * col_eff;
+	int pad_arr_range = curr_mat;
+
+	// Get i, and j from threadIdx
+	int tid = blockIdx.x * blockDim.x + threadIdx.x;
+	int i, j;
+	d_row_major_to_index(tid, row_eff, col_eff, &i, &j);
+  	// Get datarange in every output matrix
+ 	if (i < row_eff && j < col_eff) {
+ 		int max = DATAMIN;
+		int min = DATAMAX;
+ 		for (int k = 0; k < row_eff; k++) {
+ 			for (int l = 0; l < col_eff; l++) {
+ 				int index = d_index_to_row_major(i + k, j + l, row_eff, col_eff) + pad;
+
+				int el = out_mat[index];
+				if (el > max) max = el;
+				if (el < min) min = el;
+ 			}
+ 		}
+
+ 		d_arr_range[d_index_to_row_major(i, j, row_eff, col_eff) + pad_arr_range] = max-min;
+ 	}
+}
 
 // main() driver
 int main() {
@@ -530,7 +563,7 @@ int main() {
 	// initialize array of matrices and array of data ranges (int)
 	scanf("%d %d %d", &num_targets, &target_row, &target_col);
 	Matrix* arr_mat = (Matrix*)malloc(num_targets * sizeof(Matrix));
-	int arr_range[num_targets];
+	// int arr_range[num_targets];
 	
 	// Calculate variable for cuda computing.
 	int a = (target_row-kernel_row+1) * (target_col-kernel_col+1);
@@ -544,8 +577,8 @@ int main() {
 	// Allocate variable. 
 	// rm means row-major. It's indicate matrix are in row-major order.
 	// Variable declaration.
-	int * arr_mat_rm=0, * d_arr_mat_rm=0, *out_mat_rm=0, *d_out_mat_rm=0, *kernel_rm=0, *d_kernel_rm=0;
-	int size_arr_mat, size_out_mat, size_kernel;
+	int * arr_mat_rm=0, * d_arr_mat_rm=0, *out_mat_rm=0, *d_out_mat_rm=0, *kernel_rm=0, *d_kernel_rm=0, *arr_range=0, *d_arr_range=0;
+	int size_arr_mat, size_out_mat, size_kernel, size_arr_range;
 	cudaError err;
 
 	// Allocate input matrix in device and host.
@@ -610,11 +643,25 @@ int main() {
 	// for (int i = 0; i < num_targets; i++){
 		// cuda_convolution(out_mat_rm, arr_mat_rm, kernel_rm, row_eff, col_eff, kernel_row, kernel_col, i);
 	// }
-	arr_mat = rm_to_list_matrix_object(out_mat_rm, num_targets, row_eff*col_eff, row_eff, col_eff);
+	// arr_mat = rm_to_list_matrix_object(out_mat_rm, num_targets, row_eff*col_eff, row_eff, col_eff);
+
+	// Allocate arr_range matrix in device and host.
+	size_arr_range = num_targets;
+	arr_range = (int*)malloc(sizeof(int) * size_arr_range);
+	cudaMalloc((void **)&d_arr_range, sizeof(int) * size_arr_range);
+	if (arr_range == 0 | d_arr_range == 0) {
+	 	printf("Error: Memory allocation failed for arr_range.\n");
+	 	return 1;
+	}
+	cudaMemset(d_arr_range, 0, sizeof(int) * size_arr_range);
 
-	// For each target matrix, and compute their data ranges
-	for (int i = 0; i < num_targets; i++) {
-		arr_range[i] = get_matrix_datarange(&arr_mat[i]); 
+	grid.x = 1;
+	grid.y = num_targets;
+	block.x = 1;
+	d_get_matrix_datarange<<<grid, block>>>(d_out_mat_rm, d_arr_range, row_eff, col_eff);
+	err = cudaMemcpy(arr_range, d_arr_range, sizeof(int) * size_arr_range, cudaMemcpyDeviceToHost);
+	if (err != cudaSuccess) {
+	 	printf("Error copy device to host 2: %s\n", cudaGetErrorString(err));
 	}
 
 	// sort the data range array
diff --git a/src/serial.c b/src/serial.c
index ed2febe..ab02e0f 100644
--- a/src/serial.c
+++ b/src/serial.c
@@ -3,7 +3,6 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <time.h>
-#include <math.h>
 
 #define NMAX 100
 #define DATAMAX 1000
@@ -230,202 +229,6 @@ long get_floored_mean(int *n, int length) {
 	return sum / length;
 }
 
-/**
- * Function index_to_row_major 
- * 
- * Returns the index of a matrix element in row-major order 
- */
-int index_to_row_major(int row, int col, int row_eff, int col_eff) {
-	return row * col_eff + col;
-}
-
-// __device__ int d_index_to_row_major(int row, int col, int row_eff, int col_eff) {
-// 		return row * col_eff + col;
-// }
-
-/**
- * Function row_major_to_index
- * 
- * Returns the row and column of a matrix element in row-major order
- */
-void row_major_to_index(int index, int row_eff, int col_eff, int *row, int *col) {
-	*row = index / col_eff;
-	*col = index % col_eff;
-}
-
-// __device__ void d_row_major_to_index(int index, int row_eff, int col_eff, int *row, int *col) {
-// 		*row = index / col_eff;
-// 		*col = index % col_eff;
-// }
-
-/**
- * Function map_matrix
- * 
- * Returns a row major matrix of the input matrix.
- **/
-int* map_matrix(int mat[][100], int row, int col) {
-	int* map = (int*) malloc(sizeof(int) * row * col);
-	for (int i = 0; i < row; i++) {
-		for (int j = 0; j < col; j++) {
-			map[index_to_row_major(i, j, row, col)] = mat[i][j];
-		}
-	}
-	return map;
-}
-
-/*
- * Function map_matrix_extended
- * 
- * Returns a row major matrix of the input matrix.
- **/
-int* map_matrix_extended(int** mat, int row, int col) {
-	int* map = (int*) malloc(sizeof(int) * row * col);
-	for (int i = 0; i < row; i++) {
-		for (int j = 0; j < col; j++) {
-			map[index_to_row_major(i, j, row, col)] = mat[i][j];
-		}
-	}
-	return map;
-}
-
-
-/**
- * Function reverse_map_matrix
- * 
- * Returns a matrix of the input row major matrix.
- */
-int** reverse_map_matrix(int* map, int row, int col) {
-	int** mat = (int**) malloc(sizeof(int*) * row);
-	for (int i = 0; i < row; i++) {
-		mat[i] = (int*) malloc(sizeof(int) * col);
-		for (int j = 0; j < col; j++) {
-			mat[i][j] = map[index_to_row_major(i, j, row, col)];
-		}
-	}
-	return mat;
-}
-
-/**
- * Function rm_to_matrix_object
- * 
- * Return Matrix struct of row major matrix
- */
-Matrix rm_to_matrix_object(int* map, int row, int col) {
-	Matrix mat;
-	init_matrix(&mat, row, col);
-	for (int i = 0; i < row; i++) {
-		for (int j = 0; j < col; j++) {
-			mat.mat[i][j] = map[index_to_row_major(i, j, row, col)];
-		}
-	}
-	return mat;
-}
-
-/**
- * Function rm_to_list_matrix_object
- * 
- * Return List of Matrix Struct of row major matrix
- */
-Matrix* rm_to_list_matrix_object(int* map, int row, int col, int row_inner, int col_inner) {
-	Matrix* mat = (Matrix*) malloc(sizeof(Matrix) * row);
-	for (int i = 0; i < row; i++) {
-		init_matrix(&mat[i], row_inner, col_inner);
-		int pad = i * col;
-		for (int j = 0; j < row_inner; j++) {
-			for (int k = 0; k < col_inner; k++) {
-				int index = index_to_row_major(j, k, row_inner, col_inner) + pad;
-				mat[i].mat[j][k] = map[index];
-			}
-		}
-	}
-	return mat;
-}
-/**
- * Function list_matrix_object_to_rm
- * 
- * Return row major matrix of list of Matrix struct
- */
-int* list_matrix_object_to_rm(Matrix* mat, int num_matrix,  int row_inner, int col_inner) {
-	int* map = (int*) malloc(sizeof(int) * num_matrix * row_inner * col_inner);
-	for (int i = 0; i < num_matrix; i++) {
-		int pad = i * row_inner * col_inner;
-		for (int j = 0; j < row_inner; j++) {
-			for (int k = 0; k < col_inner; k++) {
-				int index = index_to_row_major(j, k, row_inner, col_inner) + pad;
-				map[index] = mat[i].mat[j][k];
-			}
-		}
-	}
-	
-	return map;
-}
-
-/**
- * Function cuda_convolution
- * 
- * Returns a matrix of the convolution of the input matrix with the kernel
- */
-void cuda_convolution(int* out_mat_rm, int* arr_mat_rm, int* kernel_rm, int row_eff, int col_eff, int kernel_row, int kernel_col, int curr_mat) {
-	// Calculate real row and column of input matrix.
-	int row = row_eff + kernel_row - 1;
-	int col = col_eff + kernel_col - 1;
-	
-	// Calculate padding target and output matrix.
-	int pad = curr_mat * row * col;
-	int pad_out = curr_mat * row_eff * col_eff;
-
-	// For each element in input matrix that is not on the boundary,
-	for (int i = 0 ; i < row_eff; i++) {
-		for (int j = 0; j < col_eff; j++) {
-			// Convolution of the element with the kernel.
-			// Calculate the sum of the kernel and the input matrix.
-			int intermediate_sum = 0;
-			for (int k = 0; k < kernel_row; k++) {
-				for (int l = 0; l < kernel_col; l++) {
-					int index = index_to_row_major(i + k, j + l, row, col) + pad;
-					int kernel_index = index_to_row_major(k, l, kernel_row, kernel_col);
-					intermediate_sum += arr_mat_rm[index] * kernel_rm[kernel_index];
-					// Print all i,j,k,l
-					// printf("i:%d, j:%d, k:%d, l:%d\n", i, j, k, l);
-
-				}
-			}
-			// Store the sum in the output matrix.
-			out_mat_rm[index_to_row_major(i, j, row_eff, col_eff) + pad_out ] = intermediate_sum;
-		}
-	}
-}
-
-// __global__ void d_cuda_convolution(int* d_out_mat, int* arr_mat_rm, int* kernel_rm, int row_eff, int col_eff, int kernel_row, int kernel_col) {
-//  // Calculate real row and column of input matrix.
-// 	int row = row_eff + kernel_row - 1;
-// 	int col = col_eff + kernel_col - 1;
-// 
-//  // Determine current matrix from block;
-// 	int curr_mat = blockIdx.y;
-//
-//  // Calculate padding target and output matrix.
-// 	int pad = curr_mat * row * col;
-// 	int pad_out = curr_mat * row_eff * col_eff;
-// 
-//  // Get i, and j from threadIdx
-//  int tid = blockIdx.x * blockDim.x + threadIdx.x;
-//  int i, j;
-//  d_row_major_to_index(tid, row_eff, col_eff, &i, &j);
-// 
-//  // Calculate element in input matrix that is not on the boundary,
-// 	if (i < row_eff && j < col_eff) {
-// 		int intermediate_sum = 0;
-// 		for (int k = 0; k < kernel_row; k++) {
-// 			for (int l = 0; l < kernel_col; l++) {
-// 				int index = d_index_to_row_major(i + k, j + l, row, col) + pad;
-// 				int kernel_index = d_index_to_row_major(k, l, kernel_row, kernel_col);
-// 				intermediate_sum += arr_mat_rm[index] * kernel_rm[kernel_index];
-// 			}
-// 		}
-// 		d_out_mat[d_index_to_row_major(i, j, row_eff, col_eff) + pad_out] = intermediate_sum;
-// 	}
-// }
 
 
 // main() driver
@@ -446,75 +249,10 @@ int main() {
 	Matrix* arr_mat = (Matrix*)malloc(num_targets * sizeof(Matrix));
 	int arr_range[num_targets];
 	
-	// Calculate variable for cuda computing.
-	int a = (target_row-kernel_row+1) * (target_col-kernel_col+1);
-	int b = 1024;
-	int block_size = a/b + (a % b != 0); // ceil(a/b)
-	int threads_per_block = 1024;
-	int row_eff = target_row - kernel_row + 1;
-	int col_eff = target_col - kernel_col + 1;
-
-
-	// Allocate variable. 
-	// rm means row-major. It's indicate matrix are in row-major order.
-	// Variable declaration.
-	int * arr_mat_rm, * d_arr_mat_rm, *out_mat_rm, *d_out_mat_rm, *kernel_rm, *d_kernel_rm;
-	int size_arr_mat, size_out_mat, size_kernel;
-	// cudaError err;
-
-	// Allocate input matrix in device and host.
-	size_arr_mat = num_targets * target_row * target_col;
-	arr_mat_rm = (int*)malloc(sizeof(int*) * size_arr_mat);
-	// cudaMalloc(&d_arr_mat_rm, sizeof(int*) * size_arr_mat);
-
-	// Allocate output matrix in device and host.
-	size_out_mat = num_targets * row_eff * col_eff;
-	out_mat_rm = (int*)malloc(sizeof(int*) * size_out_mat);
-	// cudaMalloc(&d_out_mat_rm, sizeof(int*) * size_out_mat);
-
-	// Allocate kernel matrix in host.
-	size_kernel = kernel_row * kernel_col;
-	kernel_rm = (int*)malloc(sizeof(int) * size_kernel);
-	// Store kernel in row major form and allocate kernel for device.
-	kernel_rm = map_matrix(kernel.mat, kernel_row, kernel_col);
-	// cudaMalloc(&d_kernel_rm, sizeof(int) * size_kernel);
-	// err = cudaMemcpy(d_kernel_rm, kernel_rm, sizeof(int) * size_kernel, cudaMemcpyHostToDevice);
-	// if (err != cudaSuccess) {
-	// 	printf("Error copy host to device: %s\n", cudaGetErrorString(err));
-	// }
-	
-	
-	// Read each target matrix.
+	// read each target matrix, compute their convolution matrices, and compute their data ranges
 	for (int i = 0; i < num_targets; i++) {
 		arr_mat[i] = input_matrix(target_row, target_col);
-	}
-	// Store each target matrix in row major form and allocate target matrix for device.
-	arr_mat_rm = list_matrix_object_to_rm(arr_mat, num_targets, target_row, target_col);
-	// cudaMalloc(&d_arr_mat_rm, sizeof(int) * size_arr_mat);
-	// err = cudaMemcpy(d_arr_mat_rm, arr_mat_rm, sizeof(int) * size_arr_mat, cudaMemcpyHostToDevice);
-	// if (err != cudaSuccess) {
-	// 	printf("Error copy host to device: %s\n", cudaGetErrorString(err));
-	// }
-
-	// For each target matrix, compute their convolution matrices.
-	// dim3 grid, block;
-	// block.x = block_size;
-	// block.y = num_targets;
-	// grid.x = threads_per_block;
-	// d_cuda_convolution<<<grid, block>>>(d_out_mat_rm, d_arr_mat_rm, d_kernel_rm, row_eff, col_eff, kernel_row, kernel_col);
-	// cudaMemcpy(out_mat_rm, d_out_mat_rm, sizeof(int) * size_out_mat, cudaMemcpyDeviceToHost);
-	for (int i = 0; i < num_targets; i++){
-		cuda_convolution(out_mat_rm, arr_mat_rm, kernel_rm, row_eff, col_eff, kernel_row, kernel_col, i);
-	}
-	arr_mat = rm_to_list_matrix_object(out_mat_rm, num_targets, row_eff*col_eff, row_eff, col_eff);
-
-	// // Free cuda Memory.
-	// cudaFree(d_arr_mat_rm);
-	// cudaFree(d_out_mat_rm);
-	// cudaFree(d_kernel_rm);
-
-	// For each target matrix, and compute their data ranges
-	for (int i = 0; i < num_targets; i++) {
+		arr_mat[i] = convolution(&kernel, &arr_mat[i]);
 		arr_range[i] = get_matrix_datarange(&arr_mat[i]); 
 	}
 
diff --git a/src/serial_real.c b/src/serial_real.c
deleted file mode 100644
index 7a641e5..0000000
--- a/src/serial_real.c
+++ /dev/null
@@ -1,277 +0,0 @@
-// serial.c
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <time.h>
-
-#define NMAX 100
-#define DATAMAX 1000
-#define DATAMIN -1000
-
-/* 
- * Struct Matrix
- *
- * Matrix representation consists of matrix data 
- * and effective dimensions 
- * */
-typedef struct Matrix {
-	int mat[NMAX][NMAX];	// Matrix cells
-	int row_eff;			// Matrix effective row
-	int col_eff;			// Matrix effective column
-} Matrix;
-
-
-/* 
- * Procedure init_matrix
- * 
- * Initializing newly allocated matrix
- * Setting all data to 0 and effective dimensions according
- * to nrow and ncol 
- * */
-void init_matrix(Matrix *m, int nrow, int ncol) {
-	m->row_eff = nrow;
-	m->col_eff = ncol;
-
-	for (int i = 0; i < m->row_eff; i++) {
-		for (int j = 0; j < m->col_eff; j++) {
-			m->mat[i][j] = 0;
-		}
-	}
-}
-
-
-/* 
- * Function input_matrix
- *
- * Returns a matrix with values from stdin input
- * */
-Matrix input_matrix(int nrow, int ncol) {
-	Matrix input;
-	init_matrix(&input, nrow, ncol);
-
-	for (int i = 0; i < nrow; i++) {
-		for (int j = 0; j < ncol; j++) {
-			scanf("%d", &input.mat[i][j]);
-		}
-	}
-
-	return input;
-}
-
-
-/* 
- * Procedure print_matrix
- * 
- * Print matrix data
- * */
-void print_matrix(Matrix *m) {
-	for (int i = 0; i < m->row_eff; i++) {
-		for (int j = 0; j < m->col_eff; j++) {
-			printf("%d ", m->mat[i][j]);
-		}
-		printf("\n");
-	}
-}
-
-
-/* 
- * Function get_matrix_datarange
- *
- * Returns the range between maximum and minimum
- * element of a matrix
- * */
-int get_matrix_datarange(Matrix *m) {
-	int max = DATAMIN;
-	int min = DATAMAX;
-	for (int i = 0; i < m->row_eff; i++) {
-		for (int j = 0; j < m->col_eff; j++) {
-			int el = m->mat[i][j];
-			if (el > max) max = el;
-			if (el < min) min = el;
-		}
-	}
-
-	return max - min;
-}
-
-
-/*
- * Function supression_op
- *
- * Returns the sum of intermediate value of special multiplication
- * operation where kernel[0][0] corresponds to target[row][col]
- * */
-int supression_op(Matrix *kernel, Matrix *target, int row, int col) {
-	int intermediate_sum = 0;
-	for (int i = 0; i < kernel->row_eff; i++) {
-		for (int j = 0; j < kernel->col_eff; j++) {
-			intermediate_sum += kernel->mat[i][j] * target->mat[row + i][col + j];
-		}
-	}
-
-	return intermediate_sum;
-}
-
-
-/* 
- * Function convolution
- *
- * Return the output matrix of convolution operation
- * between kernel and target
- * */
-Matrix convolution(Matrix *kernel, Matrix *target) {
-	Matrix out;
-	int out_row_eff = target->row_eff - kernel->row_eff + 1;
-	int out_col_eff = target->col_eff - kernel->col_eff + 1;
-	
-	init_matrix(&out, out_row_eff, out_col_eff);
-
-	for (int i = 0; i < out.row_eff; i++) {
-		for (int j = 0; j < out.col_eff; j++) {
-			out.mat[i][j] = supression_op(kernel, target, i, j);
-		}
-	}
-
-	return out;
-}
-
-
-/*
- * Procedure merge_array
- *
- * Merges two subarrays of n with n[left..mid] and n[mid+1..right]
- * to n itself, with n now ordered ascendingly
- * */
-void merge_array(int *n, int left, int mid, int right) {
-	int n_left = mid - left + 1;
-	int n_right = right - mid;
-	int iter_left = 0, iter_right = 0, iter_merged = left;
-	int arr_left[n_left], arr_right[n_right];
-
-	for (int i = 0; i < n_left; i++) {
-		arr_left[i] = n[i + left];
-	}
-
-	for (int i = 0; i < n_right; i++) {
-		arr_right[i] = n[i + mid + 1];
-	}
-
-	while (iter_left < n_left && iter_right < n_right) {
-		if (arr_left[iter_left] <= arr_right[iter_right]) {
-			n[iter_merged] = arr_left[iter_left++];
-		} else {
-			n[iter_merged] = arr_right[iter_right++];
-		}
-		iter_merged++;
-	}
-
-	while (iter_left < n_left)  {
-		n[iter_merged++] = arr_left[iter_left++];
-	}
-	while (iter_right < n_right) {
-		n[iter_merged++] = arr_right[iter_right++];
-	} 
-}
-
-
-/* 
- * Procedure merge_sort
- *
- * Sorts array n with merge sort algorithm
- * */
-void merge_sort(int *n, int left, int right) {
-	if (left < right) {
-		int mid = left + (right - left) / 2;
-
-		merge_sort(n, left, mid);
-		merge_sort(n, mid + 1, right);
-
-		merge_array(n, left, mid, right);
-	}	
-}
- 
-
-/* 
- * Procedure print_array
- *
- * Prints all elements of array n of size to stdout
- * */
-void print_array(int *n, int size) {
-	for (int i = 0; i < size; i++ ) printf("%d ", n[i]);
-	printf("\n");
-}
-
-
-/* 
- * Function get_median
- *
- * Returns median of array n of length
- * */
-int get_median(int *n, int length) {
-	int mid = length / 2;
-	if (length & 1) return n[mid];
-
-	return (n[mid - 1] + n[mid]) / 2;
-}
-
-
-/* 
- * Function get_floored_mean
- *
- * Returns floored mean from an array of integers
- * */
-long get_floored_mean(int *n, int length) {
-	long sum = 0;
-	for (int i = 0; i < length; i++) {
-		sum += n[i];
-	}
-
-	return sum / length;
-}
-
-
-
-// main() driver
-int main() {
-	// Time.
-	clock_t t;
-    t = clock();
-
-	int kernel_row, kernel_col, target_row, target_col, num_targets;
-	
-	// reads kernel's row and column and initalize kernel matrix from input
-	scanf("%d %d", &kernel_row, &kernel_col);
-	Matrix kernel = input_matrix(kernel_row, kernel_col);
-	
-	// reads number of target matrices and their dimensions.
-	// initialize array of matrices and array of data ranges (int)
-	scanf("%d %d %d", &num_targets, &target_row, &target_col);
-	Matrix* arr_mat = (Matrix*)malloc(num_targets * sizeof(Matrix));
-	int arr_range[num_targets];
-	
-	// read each target matrix, compute their convolution matrices, and compute their data ranges
-	for (int i = 0; i < num_targets; i++) {
-		arr_mat[i] = input_matrix(target_row, target_col);
-		arr_mat[i] = convolution(&kernel, &arr_mat[i]);
-		arr_range[i] = get_matrix_datarange(&arr_mat[i]); 
-	}
-
-	// sort the data range array
-	merge_sort(arr_range, 0, num_targets - 1);
-	
-	int median = get_median(arr_range, num_targets);	
-	int floored_mean = get_floored_mean(arr_range, num_targets); 
-
-	// print the min, max, median, and floored mean of data range array
-	printf("%d\n%d\n%d\n%d\n", 
-			arr_range[0], 
-			arr_range[num_targets - 1], 
-			median, 
-			floored_mean);
-
-	// Print execution time in seconds.
-	t = clock() - t;
-	printf("\nRuntime: %f s\n", ((float)t) / CLOCKS_PER_SEC);
-	
-	return 0;
-}
diff --git a/src/serial_test.c b/src/serial_test.c
new file mode 100644
index 0000000..ed2febe
--- /dev/null
+++ b/src/serial_test.c
@@ -0,0 +1,539 @@
+// serial.c
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <math.h>
+
+#define NMAX 100
+#define DATAMAX 1000
+#define DATAMIN -1000
+
+/* 
+ * Struct Matrix
+ *
+ * Matrix representation consists of matrix data 
+ * and effective dimensions 
+ * */
+typedef struct Matrix {
+	int mat[NMAX][NMAX];	// Matrix cells
+	int row_eff;			// Matrix effective row
+	int col_eff;			// Matrix effective column
+} Matrix;
+
+
+/* 
+ * Procedure init_matrix
+ * 
+ * Initializing newly allocated matrix
+ * Setting all data to 0 and effective dimensions according
+ * to nrow and ncol 
+ * */
+void init_matrix(Matrix *m, int nrow, int ncol) {
+	m->row_eff = nrow;
+	m->col_eff = ncol;
+
+	for (int i = 0; i < m->row_eff; i++) {
+		for (int j = 0; j < m->col_eff; j++) {
+			m->mat[i][j] = 0;
+		}
+	}
+}
+
+
+/* 
+ * Function input_matrix
+ *
+ * Returns a matrix with values from stdin input
+ * */
+Matrix input_matrix(int nrow, int ncol) {
+	Matrix input;
+	init_matrix(&input, nrow, ncol);
+
+	for (int i = 0; i < nrow; i++) {
+		for (int j = 0; j < ncol; j++) {
+			scanf("%d", &input.mat[i][j]);
+		}
+	}
+
+	return input;
+}
+
+
+/* 
+ * Procedure print_matrix
+ * 
+ * Print matrix data
+ * */
+void print_matrix(Matrix *m) {
+	for (int i = 0; i < m->row_eff; i++) {
+		for (int j = 0; j < m->col_eff; j++) {
+			printf("%d ", m->mat[i][j]);
+		}
+		printf("\n");
+	}
+}
+
+
+/* 
+ * Function get_matrix_datarange
+ *
+ * Returns the range between maximum and minimum
+ * element of a matrix
+ * */
+int get_matrix_datarange(Matrix *m) {
+	int max = DATAMIN;
+	int min = DATAMAX;
+	for (int i = 0; i < m->row_eff; i++) {
+		for (int j = 0; j < m->col_eff; j++) {
+			int el = m->mat[i][j];
+			if (el > max) max = el;
+			if (el < min) min = el;
+		}
+	}
+
+	return max - min;
+}
+
+
+/*
+ * Function supression_op
+ *
+ * Returns the sum of intermediate value of special multiplication
+ * operation where kernel[0][0] corresponds to target[row][col]
+ * */
+int supression_op(Matrix *kernel, Matrix *target, int row, int col) {
+	int intermediate_sum = 0;
+	for (int i = 0; i < kernel->row_eff; i++) {
+		for (int j = 0; j < kernel->col_eff; j++) {
+			intermediate_sum += kernel->mat[i][j] * target->mat[row + i][col + j];
+		}
+	}
+
+	return intermediate_sum;
+}
+
+
+/* 
+ * Function convolution
+ *
+ * Return the output matrix of convolution operation
+ * between kernel and target
+ * */
+Matrix convolution(Matrix *kernel, Matrix *target) {
+	Matrix out;
+	int out_row_eff = target->row_eff - kernel->row_eff + 1;
+	int out_col_eff = target->col_eff - kernel->col_eff + 1;
+	
+	init_matrix(&out, out_row_eff, out_col_eff);
+
+	for (int i = 0; i < out.row_eff; i++) {
+		for (int j = 0; j < out.col_eff; j++) {
+			out.mat[i][j] = supression_op(kernel, target, i, j);
+		}
+	}
+
+	return out;
+}
+
+
+/*
+ * Procedure merge_array
+ *
+ * Merges two subarrays of n with n[left..mid] and n[mid+1..right]
+ * to n itself, with n now ordered ascendingly
+ * */
+void merge_array(int *n, int left, int mid, int right) {
+	int n_left = mid - left + 1;
+	int n_right = right - mid;
+	int iter_left = 0, iter_right = 0, iter_merged = left;
+	int arr_left[n_left], arr_right[n_right];
+
+	for (int i = 0; i < n_left; i++) {
+		arr_left[i] = n[i + left];
+	}
+
+	for (int i = 0; i < n_right; i++) {
+		arr_right[i] = n[i + mid + 1];
+	}
+
+	while (iter_left < n_left && iter_right < n_right) {
+		if (arr_left[iter_left] <= arr_right[iter_right]) {
+			n[iter_merged] = arr_left[iter_left++];
+		} else {
+			n[iter_merged] = arr_right[iter_right++];
+		}
+		iter_merged++;
+	}
+
+	while (iter_left < n_left)  {
+		n[iter_merged++] = arr_left[iter_left++];
+	}
+	while (iter_right < n_right) {
+		n[iter_merged++] = arr_right[iter_right++];
+	} 
+}
+
+
+/* 
+ * Procedure merge_sort
+ *
+ * Sorts array n with merge sort algorithm
+ * */
+void merge_sort(int *n, int left, int right) {
+	if (left < right) {
+		int mid = left + (right - left) / 2;
+
+		merge_sort(n, left, mid);
+		merge_sort(n, mid + 1, right);
+
+		merge_array(n, left, mid, right);
+	}	
+}
+ 
+
+/* 
+ * Procedure print_array
+ *
+ * Prints all elements of array n of size to stdout
+ * */
+void print_array(int *n, int size) {
+	for (int i = 0; i < size; i++ ) printf("%d ", n[i]);
+	printf("\n");
+}
+
+
+/* 
+ * Function get_median
+ *
+ * Returns median of array n of length
+ * */
+int get_median(int *n, int length) {
+	int mid = length / 2;
+	if (length & 1) return n[mid];
+
+	return (n[mid - 1] + n[mid]) / 2;
+}
+
+
+/* 
+ * Function get_floored_mean
+ *
+ * Returns floored mean from an array of integers
+ * */
+long get_floored_mean(int *n, int length) {
+	long sum = 0;
+	for (int i = 0; i < length; i++) {
+		sum += n[i];
+	}
+
+	return sum / length;
+}
+
+/**
+ * Function index_to_row_major 
+ * 
+ * Returns the index of a matrix element in row-major order 
+ */
+int index_to_row_major(int row, int col, int row_eff, int col_eff) {
+	return row * col_eff + col;
+}
+
+// __device__ int d_index_to_row_major(int row, int col, int row_eff, int col_eff) {
+// 		return row * col_eff + col;
+// }
+
+/**
+ * Function row_major_to_index
+ * 
+ * Returns the row and column of a matrix element in row-major order
+ */
+void row_major_to_index(int index, int row_eff, int col_eff, int *row, int *col) {
+	*row = index / col_eff;
+	*col = index % col_eff;
+}
+
+// __device__ void d_row_major_to_index(int index, int row_eff, int col_eff, int *row, int *col) {
+// 		*row = index / col_eff;
+// 		*col = index % col_eff;
+// }
+
+/**
+ * Function map_matrix
+ * 
+ * Returns a row major matrix of the input matrix.
+ **/
+int* map_matrix(int mat[][100], int row, int col) {
+	int* map = (int*) malloc(sizeof(int) * row * col);
+	for (int i = 0; i < row; i++) {
+		for (int j = 0; j < col; j++) {
+			map[index_to_row_major(i, j, row, col)] = mat[i][j];
+		}
+	}
+	return map;
+}
+
+/*
+ * Function map_matrix_extended
+ * 
+ * Returns a row major matrix of the input matrix.
+ **/
+int* map_matrix_extended(int** mat, int row, int col) {
+	int* map = (int*) malloc(sizeof(int) * row * col);
+	for (int i = 0; i < row; i++) {
+		for (int j = 0; j < col; j++) {
+			map[index_to_row_major(i, j, row, col)] = mat[i][j];
+		}
+	}
+	return map;
+}
+
+
+/**
+ * Function reverse_map_matrix
+ * 
+ * Returns a matrix of the input row major matrix.
+ */
+int** reverse_map_matrix(int* map, int row, int col) {
+	int** mat = (int**) malloc(sizeof(int*) * row);
+	for (int i = 0; i < row; i++) {
+		mat[i] = (int*) malloc(sizeof(int) * col);
+		for (int j = 0; j < col; j++) {
+			mat[i][j] = map[index_to_row_major(i, j, row, col)];
+		}
+	}
+	return mat;
+}
+
+/**
+ * Function rm_to_matrix_object
+ * 
+ * Return Matrix struct of row major matrix
+ */
+Matrix rm_to_matrix_object(int* map, int row, int col) {
+	Matrix mat;
+	init_matrix(&mat, row, col);
+	for (int i = 0; i < row; i++) {
+		for (int j = 0; j < col; j++) {
+			mat.mat[i][j] = map[index_to_row_major(i, j, row, col)];
+		}
+	}
+	return mat;
+}
+
+/**
+ * Function rm_to_list_matrix_object
+ * 
+ * Return List of Matrix Struct of row major matrix
+ */
+Matrix* rm_to_list_matrix_object(int* map, int row, int col, int row_inner, int col_inner) {
+	Matrix* mat = (Matrix*) malloc(sizeof(Matrix) * row);
+	for (int i = 0; i < row; i++) {
+		init_matrix(&mat[i], row_inner, col_inner);
+		int pad = i * col;
+		for (int j = 0; j < row_inner; j++) {
+			for (int k = 0; k < col_inner; k++) {
+				int index = index_to_row_major(j, k, row_inner, col_inner) + pad;
+				mat[i].mat[j][k] = map[index];
+			}
+		}
+	}
+	return mat;
+}
+/**
+ * Function list_matrix_object_to_rm
+ * 
+ * Return row major matrix of list of Matrix struct
+ */
+int* list_matrix_object_to_rm(Matrix* mat, int num_matrix,  int row_inner, int col_inner) {
+	int* map = (int*) malloc(sizeof(int) * num_matrix * row_inner * col_inner);
+	for (int i = 0; i < num_matrix; i++) {
+		int pad = i * row_inner * col_inner;
+		for (int j = 0; j < row_inner; j++) {
+			for (int k = 0; k < col_inner; k++) {
+				int index = index_to_row_major(j, k, row_inner, col_inner) + pad;
+				map[index] = mat[i].mat[j][k];
+			}
+		}
+	}
+	
+	return map;
+}
+
+/**
+ * Function cuda_convolution
+ * 
+ * Returns a matrix of the convolution of the input matrix with the kernel
+ */
+void cuda_convolution(int* out_mat_rm, int* arr_mat_rm, int* kernel_rm, int row_eff, int col_eff, int kernel_row, int kernel_col, int curr_mat) {
+	// Calculate real row and column of input matrix.
+	int row = row_eff + kernel_row - 1;
+	int col = col_eff + kernel_col - 1;
+	
+	// Calculate padding target and output matrix.
+	int pad = curr_mat * row * col;
+	int pad_out = curr_mat * row_eff * col_eff;
+
+	// For each element in input matrix that is not on the boundary,
+	for (int i = 0 ; i < row_eff; i++) {
+		for (int j = 0; j < col_eff; j++) {
+			// Convolution of the element with the kernel.
+			// Calculate the sum of the kernel and the input matrix.
+			int intermediate_sum = 0;
+			for (int k = 0; k < kernel_row; k++) {
+				for (int l = 0; l < kernel_col; l++) {
+					int index = index_to_row_major(i + k, j + l, row, col) + pad;
+					int kernel_index = index_to_row_major(k, l, kernel_row, kernel_col);
+					intermediate_sum += arr_mat_rm[index] * kernel_rm[kernel_index];
+					// Print all i,j,k,l
+					// printf("i:%d, j:%d, k:%d, l:%d\n", i, j, k, l);
+
+				}
+			}
+			// Store the sum in the output matrix.
+			out_mat_rm[index_to_row_major(i, j, row_eff, col_eff) + pad_out ] = intermediate_sum;
+		}
+	}
+}
+
+// __global__ void d_cuda_convolution(int* d_out_mat, int* arr_mat_rm, int* kernel_rm, int row_eff, int col_eff, int kernel_row, int kernel_col) {
+//  // Calculate real row and column of input matrix.
+// 	int row = row_eff + kernel_row - 1;
+// 	int col = col_eff + kernel_col - 1;
+// 
+//  // Determine current matrix from block;
+// 	int curr_mat = blockIdx.y;
+//
+//  // Calculate padding target and output matrix.
+// 	int pad = curr_mat * row * col;
+// 	int pad_out = curr_mat * row_eff * col_eff;
+// 
+//  // Get i, and j from threadIdx
+//  int tid = blockIdx.x * blockDim.x + threadIdx.x;
+//  int i, j;
+//  d_row_major_to_index(tid, row_eff, col_eff, &i, &j);
+// 
+//  // Calculate element in input matrix that is not on the boundary,
+// 	if (i < row_eff && j < col_eff) {
+// 		int intermediate_sum = 0;
+// 		for (int k = 0; k < kernel_row; k++) {
+// 			for (int l = 0; l < kernel_col; l++) {
+// 				int index = d_index_to_row_major(i + k, j + l, row, col) + pad;
+// 				int kernel_index = d_index_to_row_major(k, l, kernel_row, kernel_col);
+// 				intermediate_sum += arr_mat_rm[index] * kernel_rm[kernel_index];
+// 			}
+// 		}
+// 		d_out_mat[d_index_to_row_major(i, j, row_eff, col_eff) + pad_out] = intermediate_sum;
+// 	}
+// }
+
+
+// main() driver
+int main() {
+	// Time.
+	clock_t t;
+    t = clock();
+
+	int kernel_row, kernel_col, target_row, target_col, num_targets;
+	
+	// reads kernel's row and column and initalize kernel matrix from input
+	scanf("%d %d", &kernel_row, &kernel_col);
+	Matrix kernel = input_matrix(kernel_row, kernel_col);
+	
+	// reads number of target matrices and their dimensions.
+	// initialize array of matrices and array of data ranges (int)
+	scanf("%d %d %d", &num_targets, &target_row, &target_col);
+	Matrix* arr_mat = (Matrix*)malloc(num_targets * sizeof(Matrix));
+	int arr_range[num_targets];
+	
+	// Calculate variable for cuda computing.
+	int a = (target_row-kernel_row+1) * (target_col-kernel_col+1);
+	int b = 1024;
+	int block_size = a/b + (a % b != 0); // ceil(a/b)
+	int threads_per_block = 1024;
+	int row_eff = target_row - kernel_row + 1;
+	int col_eff = target_col - kernel_col + 1;
+
+
+	// Allocate variable. 
+	// rm means row-major. It's indicate matrix are in row-major order.
+	// Variable declaration.
+	int * arr_mat_rm, * d_arr_mat_rm, *out_mat_rm, *d_out_mat_rm, *kernel_rm, *d_kernel_rm;
+	int size_arr_mat, size_out_mat, size_kernel;
+	// cudaError err;
+
+	// Allocate input matrix in device and host.
+	size_arr_mat = num_targets * target_row * target_col;
+	arr_mat_rm = (int*)malloc(sizeof(int*) * size_arr_mat);
+	// cudaMalloc(&d_arr_mat_rm, sizeof(int*) * size_arr_mat);
+
+	// Allocate output matrix in device and host.
+	size_out_mat = num_targets * row_eff * col_eff;
+	out_mat_rm = (int*)malloc(sizeof(int*) * size_out_mat);
+	// cudaMalloc(&d_out_mat_rm, sizeof(int*) * size_out_mat);
+
+	// Allocate kernel matrix in host.
+	size_kernel = kernel_row * kernel_col;
+	kernel_rm = (int*)malloc(sizeof(int) * size_kernel);
+	// Store kernel in row major form and allocate kernel for device.
+	kernel_rm = map_matrix(kernel.mat, kernel_row, kernel_col);
+	// cudaMalloc(&d_kernel_rm, sizeof(int) * size_kernel);
+	// err = cudaMemcpy(d_kernel_rm, kernel_rm, sizeof(int) * size_kernel, cudaMemcpyHostToDevice);
+	// if (err != cudaSuccess) {
+	// 	printf("Error copy host to device: %s\n", cudaGetErrorString(err));
+	// }
+	
+	
+	// Read each target matrix.
+	for (int i = 0; i < num_targets; i++) {
+		arr_mat[i] = input_matrix(target_row, target_col);
+	}
+	// Store each target matrix in row major form and allocate target matrix for device.
+	arr_mat_rm = list_matrix_object_to_rm(arr_mat, num_targets, target_row, target_col);
+	// cudaMalloc(&d_arr_mat_rm, sizeof(int) * size_arr_mat);
+	// err = cudaMemcpy(d_arr_mat_rm, arr_mat_rm, sizeof(int) * size_arr_mat, cudaMemcpyHostToDevice);
+	// if (err != cudaSuccess) {
+	// 	printf("Error copy host to device: %s\n", cudaGetErrorString(err));
+	// }
+
+	// For each target matrix, compute their convolution matrices.
+	// dim3 grid, block;
+	// block.x = block_size;
+	// block.y = num_targets;
+	// grid.x = threads_per_block;
+	// d_cuda_convolution<<<grid, block>>>(d_out_mat_rm, d_arr_mat_rm, d_kernel_rm, row_eff, col_eff, kernel_row, kernel_col);
+	// cudaMemcpy(out_mat_rm, d_out_mat_rm, sizeof(int) * size_out_mat, cudaMemcpyDeviceToHost);
+	for (int i = 0; i < num_targets; i++){
+		cuda_convolution(out_mat_rm, arr_mat_rm, kernel_rm, row_eff, col_eff, kernel_row, kernel_col, i);
+	}
+	arr_mat = rm_to_list_matrix_object(out_mat_rm, num_targets, row_eff*col_eff, row_eff, col_eff);
+
+	// // Free cuda Memory.
+	// cudaFree(d_arr_mat_rm);
+	// cudaFree(d_out_mat_rm);
+	// cudaFree(d_kernel_rm);
+
+	// For each target matrix, and compute their data ranges
+	for (int i = 0; i < num_targets; i++) {
+		arr_range[i] = get_matrix_datarange(&arr_mat[i]); 
+	}
+
+	// sort the data range array
+	merge_sort(arr_range, 0, num_targets - 1);
+	
+	int median = get_median(arr_range, num_targets);	
+	int floored_mean = get_floored_mean(arr_range, num_targets); 
+
+	// print the min, max, median, and floored mean of data range array
+	printf("%d\n%d\n%d\n%d\n", 
+			arr_range[0], 
+			arr_range[num_targets - 1], 
+			median, 
+			floored_mean);
+
+	// Print execution time in seconds.
+	t = clock() - t;
+	printf("\nRuntime: %f s\n", ((float)t) / CLOCKS_PER_SEC);
+	
+	return 0;
+}
-- 
GitLab