MapReduce | 二次排序

1.需求

主播数据--按照观众人数降序排序,如果观众人数相同,按照直播时长降序

# 案例数据

用户id 观众人数 直播时长

团团 300 1000

小黑 200 2000

哦吼 400 7000

卢本伟 100 6000

八戒 250 5000

悟空 100 4000

唐僧 100 3000

# 期望结果

哦吼 400 7000

团团 300 1000

八戒 250 5000

小黑 200 2000

卢本伟 100 6000

悟空 100 4000

唐僧 100 3000

2.将数据上传到hdfs

3.Idea代码

java 复制代码
package demo6;

import org.apache.hadoop.io.WritableComparable;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

public class PlayWritable implements WritableComparable<PlayWritable> {

    private int viewer;
    private int length;

    public PlayWritable() {
    }

    public PlayWritable(int viewer, int length) {
        this.viewer = viewer;
        this.length = length;
    }

    public int getViewer() {
        return viewer;
    }

    public void setViewer(int viewer) {
        this.viewer = viewer;
    }

    public int getLength() {
        return length;
    }

    public void setLength(int length) {
        this.length = length;
    }

    @Override
    public String toString() {
        return viewer + " " + length;
    }

    @Override
    public void write(DataOutput out) throws IOException {
        out.writeInt(viewer);
        out.writeInt(length);

    }

    @Override
    public void readFields(DataInput in) throws IOException {
        this.viewer = in.readInt();
        this.length = in.readInt();

    }

    @Override
    public int compareTo(PlayWritable o) {
        if (this.viewer != o.viewer){
            return this.viewer > o.viewer ? -1 : 1;
        }
        return this.length > o.length ? -1 : (this.length == o.length ? 0 : 1);

    }
}
java 复制代码
package demo6;


import demo5.DescIntWritable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.checkerframework.checker.units.qual.Length;

import java.io.IOException;

public class Sort3Job {
    public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
        Configuration conf = new Configuration();
        conf.set("fs.defaultFS","hdfs://hadoop10:8020");

        Job job = Job.getInstance(conf);
        job.setJarByClass(Sort3Job.class);

        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TextOutputFormat.class);

        TextInputFormat.addInputPath(job,new Path("/mapreduce/demo6/sort3.txt"));
        TextOutputFormat.setOutputPath(job,new Path("/mapreduce/demo6/out"));

        job.setMapperClass(Sort3Mapper.class);
        job.setReducerClass(Sort3Reducer.class);
        //map输出的键与值类型
        job.setMapOutputKeyClass(PlayWritable.class);
        job.setMapOutputValueClass(Text.class);
        //reducer输出的键与值类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(PlayWritable.class);

        boolean b = job.waitForCompletion(true);
        System.out.println(b);

    }
    static class Sort3Mapper extends Mapper<LongWritable, Text, PlayWritable,Text> {
        @Override
        protected void map(LongWritable key, Text value,Context context) throws IOException, InterruptedException {
            String[] arr = value.toString().split("\t");
            context.write(new PlayWritable(Integer.parseInt(arr[1]),Integer.parseInt(arr[2])),new Text(arr[0]));
        }
    }

    static class Sort3Reducer extends Reducer<PlayWritable,Text,Text,PlayWritable>{
        @Override
        protected void reduce(PlayWritable key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
            for (Text name : values) {
                context.write(name,key);
            }
        }
    }
}

4.在hdfs查看结果


请好好爱自己~ 想和你做朋友~

相关推荐
oMcLin14 小时前
如何在Ubuntu 22.04 LTS上优化PostgreSQL 14集群,提升大数据查询的响应速度与稳定性?
大数据·ubuntu·postgresql
信创天地14 小时前
核心系统去 “O” 攻坚:信创数据库迁移的双轨运行与数据一致性保障方案
java·大数据·数据库·金融·架构·政务
德彪稳坐倒骑驴14 小时前
Sqoop入门常用命令
数据库·hadoop·sqoop
zhyf11914 小时前
Max395(ubuntu24.04)AMD显卡GLM-4.7-UD-IQ1-M量化模型部署手册
大数据·elasticsearch·搜索引擎
小北方城市网15 小时前
微服务接口设计实战指南:高可用、易维护的接口设计原则与规范
java·大数据·运维·python·微服务·fastapi·数据库架构
武子康15 小时前
大数据-210 如何在Scikit-Learn中实现逻辑回归及正则化详解(L1与L2)
大数据·后端·机器学习
xiaobaishuoAI15 小时前
全链路性能优化实战指南:从瓶颈定位到极致优化
大数据·人工智能·科技·百度·geo
乾元15 小时前
如何把 CCIE / HCIE 的实验案例改造成 AI 驱动的工程项目——从“实验室能力”到“可交付系统”的完整迁移路径
大数据·运维·网络·人工智能·深度学习·安全·机器学习
xiaobaishuoAI15 小时前
后端工程化实战指南:从规范到自动化,打造高效协作体系
java·大数据·运维·人工智能·maven·devops·geo
俊哥大数据16 小时前
【实战项目5】基于Flink新闻热搜大数据实时分析项目
大数据·flink