```scala import org.apache.spark.sql.DataFrame import org.apache.spark.sql.types.StructField import org.apache.spark.sql.functions._ /** Spark doesn't allow you to join dataframes with different schemas. This will figure out the missing fields and tack them on to the separate dataframes and join everything together into 1 union */ def unionByNameWithDefaultNull(dfs: DataFrame*): DataFrame = { require(dfs.nonEmpty) val allDistinctFields: Seq[StructField] = (for { df <- dfs field <- df.schema.fields } yield field).distinct dfs.reduce[DataFrame]{ case (d1: DataFrame, d2: DataFrame) => val d1RequiredAdditionalFields = allDistinctFields.diff(d1.schema.fields) val enrichedd1 = d1RequiredAdditionalFields.foldLeft(d1) { case (df1, c) => df1.withColumn(c.name, lit(null)) } val d2RequiredAdditionalFields = allDistinctFields.diff(d2.schema.fields) val enrichedd2 = d2RequiredAdditionalFields.foldLeft(d2) { case (df2, c) => df2.withColumn(c.name, lit(null)) } enrichedd1.unionByName(enrichedd2) } } ```