MapReduce | 二次排序

1.需求

主播数据--按照观众人数降序排序,如果观众人数相同,按照直播时长降序

# 案例数据

用户id 观众人数 直播时长

团团 300 1000

小黑 200 2000

哦吼 400 7000

卢本伟 100 6000

八戒 250 5000

悟空 100 4000

唐僧 100 3000

# 期望结果

哦吼 400 7000

团团 300 1000

八戒 250 5000

小黑 200 2000

卢本伟 100 6000

悟空 100 4000

唐僧 100 3000

2.将数据上传到hdfs

3.Idea代码

java 复制代码
package demo6;

import org.apache.hadoop.io.WritableComparable;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

public class PlayWritable implements WritableComparable<PlayWritable> {

    private int viewer;
    private int length;

    public PlayWritable() {
    }

    public PlayWritable(int viewer, int length) {
        this.viewer = viewer;
        this.length = length;
    }

    public int getViewer() {
        return viewer;
    }

    public void setViewer(int viewer) {
        this.viewer = viewer;
    }

    public int getLength() {
        return length;
    }

    public void setLength(int length) {
        this.length = length;
    }

    @Override
    public String toString() {
        return viewer + " " + length;
    }

    @Override
    public void write(DataOutput out) throws IOException {
        out.writeInt(viewer);
        out.writeInt(length);

    }

    @Override
    public void readFields(DataInput in) throws IOException {
        this.viewer = in.readInt();
        this.length = in.readInt();

    }

    @Override
    public int compareTo(PlayWritable o) {
        if (this.viewer != o.viewer){
            return this.viewer > o.viewer ? -1 : 1;
        }
        return this.length > o.length ? -1 : (this.length == o.length ? 0 : 1);

    }
}
java 复制代码
package demo6;


import demo5.DescIntWritable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.checkerframework.checker.units.qual.Length;

import java.io.IOException;

public class Sort3Job {
    public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
        Configuration conf = new Configuration();
        conf.set("fs.defaultFS","hdfs://hadoop10:8020");

        Job job = Job.getInstance(conf);
        job.setJarByClass(Sort3Job.class);

        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TextOutputFormat.class);

        TextInputFormat.addInputPath(job,new Path("/mapreduce/demo6/sort3.txt"));
        TextOutputFormat.setOutputPath(job,new Path("/mapreduce/demo6/out"));

        job.setMapperClass(Sort3Mapper.class);
        job.setReducerClass(Sort3Reducer.class);
        //map输出的键与值类型
        job.setMapOutputKeyClass(PlayWritable.class);
        job.setMapOutputValueClass(Text.class);
        //reducer输出的键与值类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(PlayWritable.class);

        boolean b = job.waitForCompletion(true);
        System.out.println(b);

    }
    static class Sort3Mapper extends Mapper<LongWritable, Text, PlayWritable,Text> {
        @Override
        protected void map(LongWritable key, Text value,Context context) throws IOException, InterruptedException {
            String[] arr = value.toString().split("\t");
            context.write(new PlayWritable(Integer.parseInt(arr[1]),Integer.parseInt(arr[2])),new Text(arr[0]));
        }
    }

    static class Sort3Reducer extends Reducer<PlayWritable,Text,Text,PlayWritable>{
        @Override
        protected void reduce(PlayWritable key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
            for (Text name : values) {
                context.write(name,key);
            }
        }
    }
}

4.在hdfs查看结果


请好好爱自己~ 想和你做朋友~

相关推荐
云老大TG:@yunlaoda3604 小时前
华为云国际站代理商TaurusDB的成本优化体现在哪些方面?
大数据·网络·数据库·华为云
面向Google编程5 小时前
Flink源码阅读:窗口
大数据·flink
老蒋新思维5 小时前
知识IP的长期主义:当AI成为跨越增长曲线的“第二曲线引擎”|创客匠人
大数据·人工智能·tcp/ip·机器学习·创始人ip·创客匠人·知识变现
乐迪信息6 小时前
乐迪信息:煤矿皮带区域安全管控:人员违规闯入智能识别
大数据·运维·人工智能·物联网·安全
悟能不能悟7 小时前
springboot全局异常
大数据·hive·spring boot
hans汉斯7 小时前
嵌入式操作系统技术发展趋势
大数据·数据库·物联网·rust·云计算·嵌入式实时数据库·汉斯出版社
产品设计大观7 小时前
6个宠物APP原型设计案例拆解:含AI问诊、商城、领养、托运
大数据·人工智能·ai·宠物·墨刀·app原型·宠物app
liliangcsdn9 小时前
LLM MoE 形式化探索
大数据·人工智能
天远云服10 小时前
Go 语言高并发实战:批量清洗天远借贷行为验证API (JRZQ8203) 的时间序列数据
大数据·api
Hello.Reader10 小时前
Flink 系统内置函数(Built-in Functions)分类、典型用法与选型建议
大数据·flink·excel