尚硅谷大數(shù)據(jù)技術(shù)之Hadoop(MapReduce)(新)第7章 MapReduce擴(kuò)展案例
7.2?TopN案例
1.需求
對(duì)需求2.3輸出結(jié)果進(jìn)行加工,輸出流量使用量在前10的用戶信息
(1)輸入數(shù)據(jù)
13470253144 180 180 360
13509468723 7335 110349 117684
13560439638 918 4938 5856
13568436656 3597 25635 29232
13590439668 1116 954 2070
13630577991 6960 690 7650
13682846555 1938 2910 4848
13729199489 240 0 240
13736230513 2481 24681 27162
13768778790 120 120 240
13846544121 264 0 264
13956435636 132 1512 1644
13966251146 240 0 240
13975057813 11058 48243 59301
13992314666 3008 3720 6728
15043685818 3659 3538 7197
15910133277 3156 2936 6092
15959002129 1938 180 2118
18271575951 1527 2106 3633
18390173782 9531 2412 11943
84188413 4116 1432 5548
(2)輸出數(shù)據(jù)
13509468723 7335 110349 117684
13975057813 11058 48243 59301
13568436656 3597 25635 29232
13736230513 2481 24681 27162
18390173782 9531 2412 11943
13630577991 6960 690 7650
15043685818 3659 3538 7197
13992314666 3008 3720 6728
15910133277 3156 2936 6092
13560439638 918 4938 5856
2.需求分析
3.實(shí)現(xiàn)代碼
(1)編寫FlowBean類
package com.atguigu.mr.top;
import java.io.DataInput; import java.io.DataOutput; import java.io.IOException;
import org.apache.hadoop.io.WritableComparable;
public class FlowBean implements WritableComparable<FlowBean>{
private long upFlow; private long downFlow; private long sumFlow; public FlowBean() { super(); }
public FlowBean(long upFlow, long downFlow) { super(); this.upFlow = upFlow; this.downFlow = downFlow; }
@Override public void write(DataOutput out) throws IOException { out.writeLong(upFlow); out.writeLong(downFlow); out.writeLong(sumFlow); }
@Override public void readFields(DataInput in) throws IOException { upFlow = in.readLong(); downFlow = in.readLong(); sumFlow = in.readLong(); }
public long getUpFlow() { return upFlow; }
public void setUpFlow(long upFlow) { this.upFlow = upFlow; }
public long getDownFlow() { return downFlow; }
public void setDownFlow(long downFlow) { this.downFlow = downFlow; }
public long getSumFlow() { return sumFlow; }
public void setSumFlow(long sumFlow) { this.sumFlow = sumFlow; }
@Override public String toString() { return upFlow + "\t" + downFlow + "\t" + sumFlow; }
public void set(long downFlow2, long upFlow2) { downFlow = downFlow2; upFlow = upFlow2; sumFlow = downFlow2 + upFlow2; }
@Override public int compareTo(FlowBean bean) { int result; if (this.sumFlow > bean.getSumFlow()) { result = -1; }else if (this.sumFlow < bean.getSumFlow()) { result = 1; }else { result = 0; } return result; } } |
(2)編寫TopNMapper類
package com.atguigu.mr.top;
import java.io.IOException; import java.util.Iterator; import java.util.TreeMap; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper;
public class TopNMapper extends Mapper<LongWritable, Text, FlowBean, Text>{ // 定義一個(gè)TreeMap作為存儲(chǔ)數(shù)據(jù)的容器(天然按key排序) private TreeMap<FlowBean, Text> flowMap = new TreeMap<FlowBean, Text>(); private FlowBean kBean; @Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { kBean = new FlowBean(); Text v = new Text(); // 1 獲取一行 String line = value.toString(); // 2 切割 String[] fields = line.split("\t"); // 3 封裝數(shù)據(jù) String phoneNum = fields[0]; long upFlow = Long.parseLong(fields[1]); long downFlow = Long.parseLong(fields[2]); long sumFlow = Long.parseLong(fields[3]); kBean.setDownFlow(downFlow); kBean.setUpFlow(upFlow); kBean.setSumFlow(sumFlow); v.set(phoneNum); // 4 向TreeMap中添加數(shù)據(jù) flowMap.put(kBean, v); // 5 限制TreeMap的數(shù)據(jù)量,超過(guò)10條就刪除掉流量最小的一條數(shù)據(jù) if (flowMap.size() > 10) { // flowMap.remove(flowMap.firstKey()); flowMap.remove(flowMap.lastKey()); } } @Override protected void cleanup(Context context) throws IOException, InterruptedException { // 6 遍歷treeMap集合,輸出數(shù)據(jù) Iterator<FlowBean> bean = flowMap.keySet().iterator();
while (bean.hasNext()) {
FlowBean k = bean.next();
context.write(k, flowMap.get(k)); } } } |
(3)編寫TopNReducer類
package com.atguigu.mr.top;
import java.io.IOException; import java.util.Iterator; import java.util.TreeMap;
import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Reducer;
public class TopNReducer extends Reducer<FlowBean, Text, Text, FlowBean> {
// 定義一個(gè)TreeMap作為存儲(chǔ)數(shù)據(jù)的容器(天然按key排序) TreeMap<FlowBean, Text> flowMap = new TreeMap<FlowBean, Text>();
@Override protected void reduce(FlowBean key, Iterable<Text> values, Context context)throws IOException, InterruptedException {
for (Text value : values) {
?FlowBean bean = new FlowBean(); ?bean.set(key.getDownFlow(), key.getUpFlow());
?// 1 向treeMap集合中添加數(shù)據(jù) flowMap.put(bean, new Text(value));
// 2?限制TreeMap數(shù)據(jù)量,超過(guò)10條就刪除掉流量最小的一條數(shù)據(jù) if (flowMap.size() > 10) { // flowMap.remove(flowMap.firstKey()); flowMap.remove(flowMap.lastKey()); } } }
@Override protected void cleanup(Reducer<FlowBean, Text, Text, FlowBean>.Context context) throws IOException, InterruptedException {
// 3 遍歷集合,輸出數(shù)據(jù) Iterator<FlowBean> it = flowMap.keySet().iterator();
while (it.hasNext()) {
FlowBean v = it.next();
context.write(new Text(flowMap.get(v)), v); } } } (4)編寫TopNDriver類 package com.atguigu.mr.top;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class TopNDriver {
public static void main(String[] args) throws Exception { args ?= new String[]{"e:/output1","e:/output3"}; // 1 獲取配置信息,或者job對(duì)象實(shí)例 Configuration configuration = new Configuration(); Job job = Job.getInstance(configuration);
// 6 指定本程序的jar包所在的本地路徑 job.setJarByClass(TopNDriver.class);
// 2 指定本業(yè)務(wù)job要使用的mapper/Reducer業(yè)務(wù)類 job.setMapperClass(TopNMapper.class); job.setReducerClass(TopNReducer.class);
// 3 指定mapper輸出數(shù)據(jù)的kv類型 job.setMapOutputKeyClass(FlowBean.class); job.setMapOutputValueClass(Text.class);
// 4 指定最終輸出的數(shù)據(jù)的kv類型 job.setOutputKeyClass(Text.class); job.setOutputValueClass(FlowBean.class);
// 5 指定job的輸入原始文件所在目錄 FileInputFormat.setInputPaths(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path(args[1]));
// 7 將job中配置的相關(guān)參數(shù),以及job所用的java類所在的jar包, 提交給yarn去運(yùn)行 boolean result = job.waitForCompletion(true); System.exit(result ? 0 : 1); } } |