这是使用Scala的,嵌入了Java的mysql的驱动。Java代码更好修改了。原理都是一样的
[mw_shl_code=scala,true]import java.sql.{PreparedStatement, Connection, DriverManager}
import java.util.concurrent.atomic.AtomicInteger
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming._
import org.apache.spark.streaming.StreamingContext._
//No need to call Class.forName("com.mysql.jdbc.Driver") to register Driver?
object SparkStreamingForPartition {
def main(args: Array[String]) {
val conf = new SparkConf().setAppName("NetCatWordCount")
conf.setMaster("local[3]")
val ssc = new StreamingContext(conf, Seconds(5))
val dstream = ssc.socketTextStream("hadoopMaster", 9999).flatMap(_.split(" ")).map(x => (x, 1)).reduceByKey(_ + _)
dstream.foreachRDD(rdd => {
//embedded function
def func(records: Iterator[(String,Int)]) {
//Connect the mysql
var conn: Connection = null
var stmt: PreparedStatement = null
try {
val url = "jdbc:mysql://hadoopMaster:3306/streaming";
val user = "root";
val password = "hadoop"
conn = DriverManager.getConnection(url, user, password)
records.foreach(word => {
val sql = "insert into wordcounts values (?,?)";
stmt = conn.prepareStatement(sql);
stmt.setString(1, word._1)
stmt.setInt(2, word._2)
stmt.executeUpdate();
})
} catch {
case e: Exception => e.printStackTrace()
} finally {
if (stmt != null) {
stmt.close()
}
if (conn != null) {
conn.close()
}
}
}
val repartitionedRDD = rdd.repartition(3)
repartitionedRDD.foreachPartition(func)
})
ssc.start()
ssc.awaitTermination()
}
} [/mw_shl_code]
|