HADOOP——序列化

1.创建一个data目录在主目录下,并且在data目录下新建log.txt文件

2.新建flow软件包,在example软件包下

FlowBean

复制代码
package com.example.flow;

import org.apache.hadoop.io.Writable;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

//hadoop序列化
//三个属性:手机号。上行流量,下行流量
public class FlowBean implements Writable {
    private String phone;
    private long upFlow;
    private long downFlow;
    public FlowBean(String phone, long upFlow, long downFlow) {
        this.phone = phone;
        this.upFlow = upFlow;
        this.downFlow = downFlow;
    }
    //定义setter和get方法
    public String getPhone() {
        return phone;
    }
    public void setPhone(String phone) {
        this.phone = phone;
    }
    public long getUpFlow() {
        return upFlow;
    }
    public void setUpFlow(long upFlow) {
        this.upFlow = upFlow;
    }

    public void setDownFlow(long downFlow) {
        this.downFlow = downFlow;
    }
    //定义无参构造
    public FlowBean() {}
    //定义一个获取总量的方法
    public long getSumFlow(){
        return upFlow+downFlow;
    }

    @Override
    public void write(DataOutput dataOutput) throws IOException {
       dataOutput.writeUTF(phone);
       dataOutput.writeLong(upFlow);
       dataOutput.writeLong(downFlow);
    }

    @Override
    public void readFields(DataInput dataInput) throws IOException {
        phone = dataInput.readUTF();
        upFlow = dataInput.readLong();
        downFlow = dataInput.readLong();

    }
    public long getDownFlow() {
        return downFlow;
    }
}

FlowDriver

复制代码
package com.example.flow;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;


public class FlowDriver {
    public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException, IOException {
        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf);
        job.setJarByClass(FlowDriver.class);

        job.setMapperClass(FlowMapper.class);
        job.setReducerClass(FlowReducer.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(FlowBean.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        FileInputFormat.setInputPaths(job, new Path("data"));
        FileOutputFormat.setOutputPath(job, new Path("output"));
        boolean result = job.waitForCompletion(true);
        System.exit(result ? 0 : 1);

    }
}

FlowMapper

复制代码
package com.example.flow;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

//1.继承Mapper
//2.重写map函数
public class FlowMapper extends Mapper<LongWritable, Text, Text, FlowBean> {
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        System.out.println(value);

        //1.获取一行数据.使用空格拆分
        //手机号就是第一个元素
        //上行流量就是第二个元素
        //下行流量就是第三个元素
        String[] split = value.toString().split("\\s+");
        String phone = split[0];


        long upFlow = Long.parseLong(split[1]);
        long downFlow = Long.parseLong(split[2]);
        //封装对象
        FlowBean flowBean = new FlowBean(phone,upFlow, downFlow);
        //写入手机号为key,值就是这个对象
        context.write(new Text(phone),flowBean);
    }
}

FlowReducer

复制代码
package com.example.flow;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

//1.继承Reducer
//2.重写reducer
public class FlowReducer extends Reducer<Text,FlowBean,Text,Text> {
    @Override
    protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException {

        //1.遍历集合,取出每一个元素,计算上行流量和下行流量的总和
        long upFlowSum = 0L;
        long downFlowSum = 0L;
        for (FlowBean flowBean : values) {
            upFlowSum += flowBean.getUpFlow();
            downFlowSum += flowBean.getDownFlow();
        }
        //2.计算总的汇总
        long sumFlow = upFlowSum + downFlowSum;
        String flowBean = String.format("总的上行流量是: %d,总的下行流量是:%d,总的流量是:%d",upFlowSum,downFlowSum,sumFlow);

        context.write(key,new Text(flowBean));
    }
}
相关推荐
汽车仪器仪表相关领域9 分钟前
全自动化精准检测,赋能高效年检——NHD-6108全自动远、近光检测仪项目实战分享
大数据·人工智能·功能测试·算法·安全·自动化·压力测试
大厂技术总监下海10 分钟前
根治LLM胡说八道!用 Elasticsearch 构建 RAG,给你一个“有据可查”的AI
大数据·elasticsearch·开源
石像鬼₧魂石2 小时前
22端口(OpenSSH 4.7p1)渗透测试完整复习流程(含实战排错)
大数据·网络·学习·安全·ubuntu
TDengine (老段)2 小时前
TDengine Python 连接器进阶指南
大数据·数据库·python·物联网·时序数据库·tdengine·涛思数据
数据猿5 小时前
【金猿CIO展】如康集团CIO 赵鋆洲:数智重塑“顶牛”——如康集团如何用大数据烹饪万亿肉食产业的未来
大数据
txinyu的博客6 小时前
HTTP服务实现用户级窗口限流
开发语言·c++·分布式·网络协议·http
独自破碎E6 小时前
RabbitMQ中的Prefetch参数
分布式·rabbitmq
zxsz_com_cn6 小时前
设备预测性维护的意义 工业设备预测性维护是什么
大数据
深蓝电商API6 小时前
Scrapy+Rredis实现分布式爬虫入门与优化
分布式·爬虫·scrapy
samLi06207 小时前
【数据集】中国杰出青年名单数据集(1994-2024年)
大数据