微信公众号搜"智元新知"关注
微信扫一扫可直接关注哦!

spark任务提交到yarn上运行报错

1、报错信息

java.sql.sqlException: No suitable driver
	at java.sql.DriverManager.getDriver(DriverManager.java:315)
	at org.apache.spark.sql.execution.datasources.jdbc.JDBcoptions$$anonfun$7.apply(JDBcoptions.scala:84)
	at org.apache.spark.sql.execution.datasources.jdbc.JDBcoptions$$anonfun$7.apply(JDBcoptions.scala:84)
	at scala.Option.getorElse(Option.scala:121)
	at org.apache.spark.sql.execution.datasources.jdbc.JDBcoptions.<init>(JDBcoptions.scala:83)
	at org.apache.spark.sql.execution.datasources.jdbc.JDBcoptions.<init>(JDBcoptions.scala:34)
	at org.apache.spark.sql.execution.datasources.jdbc.JdbcRelationProvider.createRelation(JdbcRelationProvider.scala:32)
	at org.apache.spark.sql.execution.datasources.DataSource.resolveRelation(DataSource.scala:306)
	at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:178)
	at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:146)
	at com.dataexa.cp.base.datasource.DataBasetoDF.convert(DataBasetoDF.scala:22)
	at com.dataexa.cp.base.datasource.DataSourceReader$$anonfun$getResult$1.apply(DataSourceReader.scala:63)
	at com.dataexa.cp.base.datasource.DataSourceReader$$anonfun$getResult$1.apply(DataSourceReader.scala:56)
	at scala.collection.Iterator$class.foreach(Iterator.scala:893)
	at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
	at scala.collection.MapLike$DefaultKeySet.foreach(MapLike.scala:174)
	at com.dataexa.cp.base.datasource.DataSourceReader.getResult(DataSourceReader.scala:56)
	at com.dataexa.cp.base.datasource.DataSourceReader$.main(DataSourceReader.scala:125)
	at com.dataexa.cp.base.datasource.DataSourceReader.main(DataSourceReader.scala)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:498)

2、代码中报错的部分是这里

case class DataBasetoDF(sparkSession: SparkSession){

  def convert(dataBase: DataBase): DataFrame = {

    val dataFrame = sparkSession.read.format(dataBase.getDbType)
        .options(Map("url" -> dataBase.getUrl,
        "inferschema" -> "true",
        "dbtable" -> dataBase.getTableName,
        "user" -> dataBase.getUsername,
        "password" -> dataBase.getpassword))
//      .option ( "inferschema", "true" )
//      .option("url",dataBase.getUrl)
//      .option("dbtable",dataBase.getTableName)
//      .option("user",dataBase.getUsername)
//      .option("password",dataBase.getpassword)
      .load()

    dataFrame
  }
}

spark读取MysqL数据库的时候报的错。。。。

代码加上driver的配置项,问题解决,同事写的时候说是不需要加,真是坑死人!

case class DataBasetoDF(sparkSession: SparkSession){
      def convert(dataBase: DataBase): DataFrame = {
        val dataFrame = sparkSession.read.format(dataBase.getDbType)
            .options(Map("url" -> dataBase.getUrl,
            "inferschema" -> "true",
            "driver" -> "com.MysqL.jdbc.Driver",
            "dbtable" -> dataBase.getTableName,
            "user" -> dataBase.getUsername,
            "password" -> dataBase.getpassword))
    //      .option ( "inferschema", "true" )
    //      .option("url",dataBase.getUrl)
    //      .option("dbtable",dataBase.getTableName)
    //      .option("user",dataBase.getUsername)
    //      .option("password",dataBase.getpassword)
          .load()
    
        dataFrame
      }
    }

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 [email protected] 举报,一经查实,本站将立刻删除。

相关推荐