{
// 1. read csv:
val df1 = spark.read.option("header", "true").csv(fileFullName)
df1.show(false)
// when you have format:
// ID,Name,Age,Add,ress,Salary
// 1,Ross,32,Ah,"med,abad",2000
// 2,Rachel,25,Delhi,,1500
// 3,Chandler,23,Kota,,2000
// 4,Monika,25,Mumbai,,6500
// 5,Mike,27,Bhopal,,8500
// 6,Phoebe,22,MP,,4500
// 7,Joey,24,Indore,,10000
// 3. result
// +---+--------+---+------+--------+------+
// |ID |Name |Age|Add |ress |Salary|
// +---+--------+---+------+--------+------+
// |1 |Ross |32 |Ah |med,abad|2000 |
// |2 |Rachel |25 |Delhi |null |1500 |
// |3 |Chandler|23 |Kota |null |2000 |
// |4 |Monika |25 |Mumbai|null |6500 |
// |5 |Mike |27 |Bhopal|null |8500 |
// |6 |Phoebe |22 |MP |null |4500 |
// |7 |Joey |24 |Indore|null |10000 |
// +---+--------+---+------+--------+------+
}