cache pyspark
df_cache = spark.read.csv(["data.csv"],header=True,inferSchema=True)
df.cache().count()
Sore Stork
df_cache = spark.read.csv(["data.csv"],header=True,inferSchema=True)
df.cache().count()