doctor
刘如恩,心血管内科,冠心病
刘生旺,消化内科,脂肪性肝病
韩岩,内科分泌,甲状腺疾病
王强,妇科内分泌,不孕不育与辅助助孕
马冬梅,产后妇科,妇科肿瘤疾病诊治
冯利,小儿消化科,小儿消化系统疾病
张中,小儿神经外科,神经导航下切除脑肿瘤
刘宇,小儿骨科,小儿脊柱及矫形外科
沈腾,创伤骨科,脊柱脊髓损伤
玛丽,骨关节科,股骨颈骨折
赵六,骨质疏松科,代谢性骨病的诊治
department
内科,内科
妇科,妇
小儿科,小儿
骨科,骨
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, SQLContext}
object Need3 {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setMaster("local").setAppName(this.getClass.getSimpleName)
val sc = new SparkContext(conf)
val sQLContext = new SQLContext(sc)
import sQLContext.implicits._
val sorceFile: RDD[String] = sc.textFile("D:\\djz\\20200204\\need3\\doctor.txt")
val sorceFile2: RDD[String] = sc.textFile("D:\\djz\\20200204\\need3\\department.txt")
val rdd1 = sorceFile.map(line => {
val split = line.split(",")
(split(0), split(1), split(2))
}).toDF("doctor","sec_dep","bings")
rdd1.createTempView("doctors")
val rdd2= sorceFile2.map(line => {
val split: Array[String] = line.split(",")
(split(0), split(1))
}).toDF("first_dep","keyword")
rdd2.createTempView("department")
sQLContext.sql("select doctor,sec_dep,bings,first_dep from doctors CROSS JOIN department " +
"where doctors.sec_dep like concat('%',keyword,'%')").show()
sc.stop()
}
}