聚类(一)pyspark 实现特征的ID化

  • 项目中需要实现一个简单的聚类,初步衡量了下样本数量在2000W左右,第一次写spark还是14年的时候,而且都是基于java实现的模型算法,这次就简单用pyspark实现了特征的Id化, 即将字符串类型的特征转为数字表示的Id。这个在模型中相对比较常见, 比较主要的点应该是使用broadcast广播了特征和id的映射关系。
    #!/usr/bin/env python #-*-coding:utf-8-*- from pyspark.conf import SparkConf from pyspark.context import SparkContext from pyspark.rdd import RDD from pyspark.files import SparkFiles from StringIO import StringIO import collections def parse(text): elems = text[1].split() if len(elems) != 2:return [] return elems[1].split(',') def feat_to_id(text, feats_id_map): elems = text[1].split() first = elems[0] feats = elems[1].split(',') feat_ids = [str(feats_id_map.value[feat]) for feat in feats] return first, ",".join(feat_ids) def feat_id_to_line(x, y): return x + "\t" + str(y) """ 样本特征的ID化,即将样本的字符串转为数字 @param input_path: 样本输入路径,样本内容为 id feat1,feat2,feat3..... @param output_samples_path: 样本输出路径, 输出内容 id 2,3,4 @param output_feats_map_path: 字符串->ID的映射文件输出路径 """ def norm_feats(input_path, output_samples_path, output_feats_map_path): conf = SparkConf().setMaster("local[*]") conf = conf.setAppName("bigdata-model-norm_feats") sc = SparkContext(conf=conf) sample_rdd = sc.newAPIHadoopFile(input_path, "org.apache.hadoop.mapreduce.lib.input.TextInputFormat", "org.apache.hadoop.io.LongWritable", "org.apache.hadoop.io.Text") feats_list = sample_rdd.flatMap(lambda x: parse(x)).distinct().collect() feats_map = collections.defaultdict() index = 1 for feat in feats_list: feats_map[feat] = index index += 1 feats_map_lookup = sc.broadcast(feats_map) normed_sample_rdd = sample_rdd.map(lambda x: feat_to_id(x, feats_map_lookup)) output_conf = { 'mapreduce.output.fileoutputformat.compress':'true', 'mapreduce.output.fileoutputformat.compress.codec':'org.apache.hadoop.io.compress.GzipCodec', 'mapreduce.output.fileoutputformat.compress.type':'BLOCK' } normed_sample_rdd.saveAsNewAPIHadoopFile(output_samples_path, 'org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat', conf = output_conf) feats_map_rdd = sc.parallelize(feats_map.keys(), 1).map(lambda x: feat_id_to_line(x, feats_map_lookup.value[x])) feats_map_rdd.saveAsTextFile(output_feats_map_path) #feats_map_rdd.saveAsNewAPIHadoopFile(output_feats_map_path, # 'org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat', # conf = output_conf) sc.stop() if __name__ == "__main__": input_path = "/tmp/samples.txt"; output_samples_path = "/tmp/cluster/samples/" output_feats_map_path = "/tmp/cluster/feats/" norm_feats(input_path, output_samples_path, output_feats_map_path)

你可能感兴趣的:(python,spark)