hadoop-node web-ui: http://192.168.1.10:50070/
spark web-ui: http://192.168.1.10:8080/
spark的url: http://192.168.1.10:7077
1.创建一个scala项目:
2.创建打包jar包:
3.
package testScala
import org.apache.spark.{SparkConf, SparkContext}
object test {
def main(args: Array[String]): Unit ={
testApp1()
}
def testApp1(): Unit ={
val conf = new SparkConf().setMaster("spark://192.168.1.10:7077").setAppName("app1").set("spark.ui.enabled","false")
val sc = new SparkContext(conf)
sc.addJar("/Users/wsy/Workspaces/study/out/artifacts/study_jar/study.jar")
val helloWorld = sc.parallelize(List("Hello,World!","Hello,Spark!","Hello,BigData!"))
helloWorld.foreach(line => println(line))
}
def testApp2(): Unit ={
val conf = new SparkConf().setMaster("spark://192.168.1.10:7077").setAppName("app2")
val sc = new SparkContext(conf)
sc.addJar("/Users/wsy/Workspaces/study/out/artifacts/study_jar/study.jar")
val file = sc.textFile("hdfs://192.168.1.10:19000/Texts.txt")
val count = file.flatMap(line => line.split(" ")).map(word => (word,1)).reduceByKey(_+_)
val res = count.collect()
res.foreach(println(_))
}
}
4.build
java -jar test.jar
5.成功输出:
https://jianye.hd.weibo.com/site/?id=1274
转载请注明:SuperIT » 最简单的spark实例