pyspark学习_wordcount

#统计文件中每个字母出现次数
#第一版rdd  文件行类型:Aaron,OperatingSystem,100
from pyspark import SparkConf,SparkContext
conf = SparkConf().setAppName("RddwordCount").setMaster("local[*]")
sc = SparkContext(conf=conf)
lines = sc.textFile("people.txt") #读取文件
lines.flatMap(lambda x:x.split(",")).map(lambda x:(x,1)).reduceByKey(lambda x,y:x+y).foreach(print)
#第二版dataframe
from pyspark.sql import SparkSession
from pyspark.sql.functions import split,explode
spark = SparkSession.builder.getOrCreate()
lines = spark.read.text("people.txt")
words = lines.select(explode(split(lines.value,",")).alias("word"))
words.groupBy("word").count().show()
#第三版流数据类型 socket或者file
from pyspark.sql import SparkSession
from pyspark.sql.functions import split,explode

spark = SparkSession.builder.getOrCreate()
"""
#file版本
lines = spark.readStream.format("text").option("seq","\n").load(path)#path:文件夹
words = lines.select(explode(split(lines.value,",")).alias("word"))
"""
#socket版本
lines = spark.readStream.format("socket").option("host","master").option("port",9000).load()

words = lines.select(explode(split(lines.value," ")).alias("word"))
wordCounts = words.groupBy("word").count()
wordCounts.writeStream.outputMode("complete").format("console").trigger(processingTime="8 seconds").start().awaitTermination()

你可能感兴趣的:(spark,spark,python)