diff --git a/spark/clean.py b/spark/clean.py
new file mode 100644
index 0000000000000000000000000000000000000000..8ab12d4a28b836fd8de050710a55c811c3c2509a
--- /dev/null
+++ b/spark/clean.py
@@ -0,0 +1,39 @@
+from pyspark.sql import SparkSession
+
+spark = SparkSession.builder \
+    .appName("TelcoCustomerChurn") \
+    .getOrCreate()
+
+df = spark.read.csv("../data/TelcoCustomerChurn.csv", header=True, inferSchema=True)
+
+from pyspark.sql.functions import col, when, count, mean, isnan
+
+df = df.drop("customerID")
+
+df = df.withColumn("TotalCharges", col("TotalCharges").cast("double"))
+
+# missing_counts = df.select([count(when(col(c).isNull(), c)).alias(c) for c in df.columns])
+# missing_counts.show()
+
+# tenure_zero_rows = df.filter(col("tenure") == 0)
+# tenure_zero_rows.show()
+
+df = df.fillna({"TotalCharges": 0})
+
+# missing_counts = df.select([count(when(col(c).isNull(), c)).alias(c) for c in df.columns])
+# missing_counts.show()
+
+# redundant
+columns_to_replace = [
+    "MultipleLines", "OnlineSecurity", "OnlineBackup",
+    "DeviceProtection", "TechSupport", "StreamingTV", "StreamingMovies"
+]
+
+for col_name in columns_to_replace:
+    df = df.withColumn(
+        col_name,
+        when(col(col_name).isin(["No phone service", "No internet service"]), "No")
+        .otherwise(col(col_name))
+    )
+
+# df.show()
\ No newline at end of file