MapReduce | 二次排序

1.需求

主播数据--按照观众人数降序排序,如果观众人数相同,按照直播时长降序

# 案例数据

用户id 观众人数 直播时长

团团 300 1000

小黑 200 2000

哦吼 400 7000

卢本伟 100 6000

八戒 250 5000

悟空 100 4000

唐僧 100 3000

# 期望结果

哦吼 400 7000

团团 300 1000

八戒 250 5000

小黑 200 2000

卢本伟 100 6000

悟空 100 4000

唐僧 100 3000

2.将数据上传到hdfs

3.Idea代码

java 复制代码
package demo6;

import org.apache.hadoop.io.WritableComparable;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

public class PlayWritable implements WritableComparable<PlayWritable> {

    private int viewer;
    private int length;

    public PlayWritable() {
    }

    public PlayWritable(int viewer, int length) {
        this.viewer = viewer;
        this.length = length;
    }

    public int getViewer() {
        return viewer;
    }

    public void setViewer(int viewer) {
        this.viewer = viewer;
    }

    public int getLength() {
        return length;
    }

    public void setLength(int length) {
        this.length = length;
    }

    @Override
    public String toString() {
        return viewer + " " + length;
    }

    @Override
    public void write(DataOutput out) throws IOException {
        out.writeInt(viewer);
        out.writeInt(length);

    }

    @Override
    public void readFields(DataInput in) throws IOException {
        this.viewer = in.readInt();
        this.length = in.readInt();

    }

    @Override
    public int compareTo(PlayWritable o) {
        if (this.viewer != o.viewer){
            return this.viewer > o.viewer ? -1 : 1;
        }
        return this.length > o.length ? -1 : (this.length == o.length ? 0 : 1);

    }
}
java 复制代码
package demo6;


import demo5.DescIntWritable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.checkerframework.checker.units.qual.Length;

import java.io.IOException;

public class Sort3Job {
    public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
        Configuration conf = new Configuration();
        conf.set("fs.defaultFS","hdfs://hadoop10:8020");

        Job job = Job.getInstance(conf);
        job.setJarByClass(Sort3Job.class);

        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TextOutputFormat.class);

        TextInputFormat.addInputPath(job,new Path("/mapreduce/demo6/sort3.txt"));
        TextOutputFormat.setOutputPath(job,new Path("/mapreduce/demo6/out"));

        job.setMapperClass(Sort3Mapper.class);
        job.setReducerClass(Sort3Reducer.class);
        //map输出的键与值类型
        job.setMapOutputKeyClass(PlayWritable.class);
        job.setMapOutputValueClass(Text.class);
        //reducer输出的键与值类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(PlayWritable.class);

        boolean b = job.waitForCompletion(true);
        System.out.println(b);

    }
    static class Sort3Mapper extends Mapper<LongWritable, Text, PlayWritable,Text> {
        @Override
        protected void map(LongWritable key, Text value,Context context) throws IOException, InterruptedException {
            String[] arr = value.toString().split("\t");
            context.write(new PlayWritable(Integer.parseInt(arr[1]),Integer.parseInt(arr[2])),new Text(arr[0]));
        }
    }

    static class Sort3Reducer extends Reducer<PlayWritable,Text,Text,PlayWritable>{
        @Override
        protected void reduce(PlayWritable key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
            for (Text name : values) {
                context.write(name,key);
            }
        }
    }
}

4.在hdfs查看结果


请好好爱自己~ 想和你做朋友~

相关推荐
woshiabc11133 分钟前
windows安装Elasticsearch及增删改查操作
大数据·elasticsearch·搜索引擎
lucky_syq1 小时前
Saprk和Flink的区别
大数据·flink
lucky_syq1 小时前
流式处理,为什么Flink比Spark Streaming好?
大数据·flink·spark
袋鼠云数栈1 小时前
深入浅出Flink CEP丨如何通过Flink SQL作业动态更新Flink CEP作业
大数据
清平乐的技术专栏1 小时前
Hive SQL 查询所有函数
hive·hadoop·sql
小白学大数据2 小时前
如何使用Selenium处理JavaScript动态加载的内容?
大数据·javascript·爬虫·selenium·测试工具
15年网络推广青哥3 小时前
国际抖音TikTok矩阵运营的关键要素有哪些?
大数据·人工智能·矩阵
节点。csn3 小时前
Hadoop yarn安装
大数据·hadoop·分布式
不惑_4 小时前
小白入门 · 腾讯云轻量服务器部署 Hadoop 3.3.6
服务器·hadoop·腾讯云
csding114 小时前
写入hive metastore报问题Permission denied: user=hadoop,inode=“/user/hive”
数据仓库·hive·hadoop