Closed nightscape closed 4 years ago
Hello im trying to use this connector but i am unable to compile it with sbt. I need this version because i am using scala 2.12, would you kindly help me?
@Samuman93 you need to be more specific about what's not working. Please describe what you were doing and what errors occurred.
I git clone the repo, and when i use sbt compile or sbt package the console outputs:
``$ sbt compile
[warn] No sbt.version set in project/build.properties, base directory: C:\mypath\spark\azure-sqldb-spark
[info] Loading global plugins from C:\mypath.sbt\1.0\plugins
[info] Set current project to azure-sqldb-spark (in build file:/C:/mypath/spark/azure-sqldb-spark/)
[info] Executing in batch mode. For better performance use sbt's shell
[info] Compiling 13 Scala sources and 4 Java sources to C:\mypath\spark\azure-sqldb-spark\target\scala-2.12\classes ...
[error] C:\mypath\spark\azure-sqldb-spark\src\main\scala\com\microsoft\azure\sqldb\spark\LoggingTrait.scala:25:12: object slf4j is not a member of package org
[error] import org.slf4j.{Logger, LoggerFactory}
[error] ^
[error] C:\mypath\spark\azure-sqldb-spark\src\main\scala\com\microsoft\azure\sqldb\spark\LoggingTrait.scala:31:33: not found: type Logger
[error] @transient private var log : Logger = null // scalastyle:ignore
[error] ^
[error] C:\mypath\spark\azure-sqldb-spark\src\main\scala\com\microsoft\azure\sqldb\spark\LoggingTrait.scala:40:22: not found: type Logger
[error] protected def log: Logger = {
[error] ^
[error] C:\mypath\spark\azure-sqldb-spark\src\main\scala\com\microsoft\azure\sqldb\spark\LoggingTrait.scala:43:14: not found: value LoggerFactory
[error] log = LoggerFactory.getLogger(logName)
[error] ^
[error] C:\mypath\spark\azure-sqldb-spark\src\main\scala\com\microsoft\azure\sqldb\spark\config\Config.scala:26:12: object apache is not a member of package org
[error] import org.apache.spark.sql.SparkSession
[error] ^
[error] C:\mypath\spark\azure-sqldb-spark\src\main\scala\com\microsoft\azure\sqldb\spark\config\Config.scala:27:12: object apache is not a member of package org
[error] import org.apache.spark.{SparkConf, SparkContext}
[error] ^
[error] C:\mypath\spark\azure-sqldb-spark\src\main\scala\com\microsoft\azure\sqldb\spark\config\Config.scala:189:27: not found: type SparkContext
[error] def apply(sparkContext: SparkContext): Config = apply(sparkContext.getConf)
[error] ^
[error] C:\mypath\spark\azure-sqldb-spark\src\main\scala\com\microsoft\azure\sqldb\spark\config\Config.scala:223:24: not found: type SparkConf
[error] def apply(sparkConf: SparkConf, options: collection.Map[String, String]): Config =
[error] ^
[error] C:\mypath\spark\azure-sqldb-spark\src\main\scala\com\microsoft\azure\sqldb\spark\config\Config.scala:211:24: not found: type SparkConf
[error] def apply(sparkConf: SparkConf): Config = apply(sparkConf, Map.empty[String, String])
[error] ^
[error] C:\mypath\spark\azure-sqldb-spark\src\main\scala\com\microsoft\azure\sqldb\spark\config\Config.scala:200:27: not found: type SparkSession
[error] def apply(sparkSession: SparkSession): Config = apply(sparkSession.sparkContext.getConf)
[error] ^
[error] C:\mypath\spark\azure-sqldb-spark\src\main\scala\com\microsoft\azure\sqldb\spark\config\Config.scala:274:37: not found: type SparkConf
[error] def getOptionsFromConf(sparkConf: SparkConf): collection.Map[String, String] =
[error] ^
[error] C:\mypath\spark\azure-sqldb-spark\src\main\scala\com\microsoft\azure\sqldb\spark\connect\package.scala:27:12: object apache is not a member of package org
[error] import org.apache.spark.sql.
[error] ^
[error] C:\mypath\spark\azure-sqldb-spark\src\main\scala\com\microsoft\azure\sqldb\spark\connect\package.scala:67:76: not found: type Row
[error] implicit def toDataFrameFunctions[T](ds: Dataset[T]): DataFrameFunctions[Row] = DataFrameFunctions[Row](ds.toDF())
[error] ^
[error] C:\mypath\spark\azure-sqldb-spark\src\main\scala\com\microsoft\azure\sqldb\spark\connect\package.scala:67:44: not found: type Dataset
[error] implicit def toDataFrameFunctions[T](ds: Dataset[T]): DataFrameFunctions[Row] = DataFrameFunctions[Row](ds.toDF())
[error] ^
[error] C:\mypath\spark\azure-sqldb-spark\src\main\scala\com\microsoft\azure\sqldb\spark\connect\package.scala:55:51: not found: type DataFrameWriter
[error] implicit def toDataFrameWriterFunctions(writer: DataFrameWriter[]): DataFrameWriterFunctions =
[error] ^
[error] C:\mypath\spark\azure-sqldb-spark\src\main\scala\com\microsoft\azure\sqldb\spark\connect\package.scala:43:51: not found: type DataFrameReader
[error] implicit def toDataFrameReaderFunctions(reader: DataFrameReader): DataFrameReaderFunctions =
[error] ^
[error] C:\mypath\spark\azure-sqldb-spark\src\main\scala\com\microsoft\azure\sqldb\spark\connect\DataFrameFunctions.scala:32:12: object apache is not a member of package org
[error] import org.apache.spark.sql.{DataFrame, Row}
[error] ^
[error] C:\mypath\spark\azure-sqldb-spark\src\main\scala\com\microsoft\azure\sqldb\spark\connect\DataFrameFunctions.scala:39:71: not found: type DataFrame
[error] private[spark] case class DataFrameFunctions[T](@transient dataFrame: DataFrame) extends LoggingTrait {
[error] ^
[error] C:\mypath\spark\azure-sqldb-spark\src\main\scala\com\microsoft\azure\sqldb\spark\connect\DataFrameFunctions.scala:97:59: not found: type Row
[error] private def bulkCopy(config: Config, iterator: Iterator[Row], metadata: BulkCopyMetadata): Unit = {
[error] ^
[error] C:\mypath\spark\azure-sqldb-spark\src\main\java\com\microsoft\azure\sqldb\spark\bulkcopy\SQLServerBulkDataFrameFileRecord.java:28:8: object apache is not a member of package org
[error] import org.apache.spark.sql.Row;
[error] ^
[error] C:\mypath\spark\azure-sqldb-spark\src\main\java\com\microsoft\azure\sqldb\spark\bulkcopy\SQLServerBulkDataFrameFileRecord.java:48:54: not found: type Row
[error] public SQLServerBulkDataFrameFileRecord(Iterator
Any news of this?
Thank you @nightscape for your contribution. Unfortunately, as this project is not actively maintained, we will not be able to merge this in. The newer connector here already uses SBT. It would be great if you can evaluate, use and hopefully contribute to that project. Closing this PR.
This PR replaces the Maven build with an SBT one in order to adhere to Scala standards and facilitate cross-building.