Hadoop 自定义序列化MapReduce实战
Posted 阳光大男孩!
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了Hadoop 自定义序列化MapReduce实战相关的知识,希望对你有一定的参考价值。
前言
我笑了,跑个mapReduce,执行了三十秒
自定义序列化
这次实现手机号上行下行流量的求和,模拟使用MapReduce实现
Bean
import org.apache.hadoop.io.Writable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
//1 继承 Writable 接口
public class FlowBean implements Writable {
private long upFlow; //上行流量
private long downFlow; //下行流量
private long sumFlow; //总流量
//2 提供无参构造
public FlowBean() {
}
//3 提供三个参数的 getter 和 setter 方法
public long getUpFlow() {
return upFlow;
}
public void setUpFlow(long upFlow) {
this.upFlow = upFlow;
}
public long getDownFlow() {
return downFlow;
}
public void setDownFlow(long downFlow) {
this.downFlow = downFlow;
}
public long getSumFlow() {
return sumFlow;
}
public void setSumFlow(long sumFlow) {
this.sumFlow = sumFlow;
}
public void setSumFlow() {
this.sumFlow = this.upFlow + this.downFlow;
}
//4 实现序列化和反序列化方法,注意顺序一定要保持一致
@Override
public void write(DataOutput dataOutput) throws IOException {
dataOutput.writeLong(upFlow);
dataOutput.writeLong(downFlow);
dataOutput.writeLong(sumFlow);
}
@Override
public void readFields(DataInput dataInput) throws IOException {
this.upFlow = dataInput.readLong();
this.downFlow = dataInput.readLong();
this.sumFlow = dataInput.readLong();
}
//5 重写 ToString
@Override
public String toString() {
return upFlow + "\\t" + downFlow + "\\t" + sumFlow;
}
}
Mapper——处理的是每行数据
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class FlowMapper extends Mapper<LongWritable, Text, Text, FlowBean> {
// 输入的value
private Text outK = new Text();
// 输出的value
private FlowBean outV = new FlowBean();
@Override
protected void map(LongWritable key, Text value, Mapper.Context context)
throws IOException, InterruptedException {
//1 获取一行数据,转成字符串
// 18833025128 100 200
String line = value.toString();
//2 切割数据
String[] split = line.split(" ");
//3 抓取我们需要的数据:手机号,上行流量,下行流量
String phone = split[0];
String up = split[1];
String down = split[2];
//4 封装 outK outV
outK.set(phone);
outV.setUpFlow(Long.parseLong(up));
outV.setDownFlow(Long.parseLong(down));
outV.setSumFlow(Long.parseLong(up)+Long.parseLong(down));
System.out.println("mapper.................结束");
System.out.println(outV);
//5 写出 outK outV
context.write(outK, outV);
}
}
Reducer——对Mapper的将结果进行整合
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class FlowReducer extends Reducer<Text,FlowBean, Text,FlowBean> {
@Override
protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException {
// 给定的是key以及集合的累加
FlowBean flowBean = new FlowBean();
long upFlow = 0;
long downFlow = 0;
long sumFlow = 0;
for (FlowBean f:values){
upFlow +=f.getUpFlow();
downFlow +=f.getDownFlow();
sumFlow +=f.getSumFlow();
}
flowBean.setUpFlow(upFlow);
flowBean.setDownFlow(downFlow);
flowBean.setSumFlow(sumFlow);
System.out.println("reducer................结束");
System.out.println(flowBean);
context.write(key,flowBean);
}
}
Driver——任务驱动类,用于执行任务
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.FileOutputStream;
import java.io.IOException;
public class FlowDriver {
// 这里为了直观显示参数 使用了硬编码的形式,实际开发中可以通过外部传参
private static final String HDFS_URL = "hdfs://192.168.56.80:9000";
private static final String HADOOP_USER_NAME = "root";
// 需要打包并在服务器上执行 hadoop jar hadoop-word-count-1.0.jar com.heibaiying.flow.FlowDriver
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
// 获取job
Configuration configuration = new Configuration();
System.setProperty("HADOOP_USER_NAME", HADOOP_USER_NAME);
// 指明HDFS的地址
configuration.set("fs.defaultFS", HDFS_URL);
Job job = Job.getInstance(configuration);
// 设置jar
job.setJarByClass(FlowDriver.class);
// 关键mapper、reducer
job.setMapperClass(FlowMapper.class);
job.setReducerClass(FlowReducer.class);
// 设置mapper输出的key和value
job.setMapOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);
// 设置最终输出的key、value类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);
FileInputFormat.setInputPaths(job,new Path("/test/text.txt"));
FileOutputFormat.setOutputPath(job,new Path("/test/text1.txt"));
boolean b = job.waitForCompletion(true);
System.out.println(b);
}
}
测试
准备文件text.txt,内容为18888888 100 200
需要打包到hadoop集群上,执行
hadoop jar hadoop-word-count-1.0.jar com.heibaiying.flow.FlowDriver
输出为自定义bean
以上是关于Hadoop 自定义序列化MapReduce实战的主要内容,如果未能解决你的问题,请参考以下文章
hadoop离线day05--Hadoop MapReduce
一脸懵逼学习Hadoop中的MapReduce程序中自定义分组的实现