Mapreduce的使用

创建三个类:

复制代码
package com.example.mapreduce;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class WordCountDriver {
    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {

        //设置用户名:
        System.setProperty("HADOOP_USER_NAME", "root");
        //1.获取job对象
        Configuration conf = new Configuration();
        conf.set("fs.defaultFS", "hdfs://hadoop100:8020");

        Job job = Job.getInstance(conf);
        //2.关联啊本地Driver类的jar
        job.setJarByClass(WordCountDriver.class);
        //3.关联map和reduce
        job.setMapperClass(WordCountMapper.class);
        job.setReducerClass(WordCountReducer.class);
        //4.设置map的输出kv类型
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(LongWritable.class);
        //5.设置map的输出kv类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(LongWritable.class);
        //6.设置输入数据和输出结果的地址
        //FileInputFormat.setInputPaths(job, new Path("E\\cinput"));
        //FileOutputFormat.setOutputPath(job, new Path("E\\output10"));
        FileInputFormat.setInputPaths(job, new Path("/cinput"));
        FileOutputFormat.setOutputPath(job, new Path("/output10"));

        //7.提交job
        System.exit(job.waitForCompletion(true) ? 0 : 1);

    }
}

package com.example.mapreduce;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.io.Text;

import java.io.IOException;
//1.继承 hadoop的map重写
//2.重写map方法
public class WordCountMapper extends Mapper<LongWritable, Text, Text, LongWritable> {
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
    //每一行的文本内容,使用空格做拆分,得到一个列表
        String[] words = value.toString().split(" ");
    //对每一个单词,把它当做key,并设置value为1
        for (String word : words) {
            context.write(new Text(word), new LongWritable(1));
        }
    }
}

package com.example.mapreduce;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.io.Text;
import java.io.IOException;
//继承hadoop的reducer类
//重写reduce方法
public class WordCountReducer extends Reducer<Text, LongWritable, Text, LongWritable> {
    @Override
    protected void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {
       //对value中的值做累加求和
        long sum = 0;
        for (LongWritable value : values) {
            sum += value.get();
        }
        //将结果输出
        context.write(key, new LongWritable(sum));
    }
}
相关推荐
J2虾虾13 分钟前
数据分析师课程
大数据
Shely201718 分钟前
MySQL数据表管理
数据库·mysql
爬山算法26 分钟前
MongoDB(80)如何在MongoDB中使用多文档事务?
数据库·python·mongodb
APguantou32 分钟前
NCRE-三级数据库技术-第2章-需求分析
数据库·需求分析
大力财经1 小时前
纳米漫剧流水线接入满血版Seedance 2.0 实现工业级AI漫剧确定性交付
大数据·人工智能
寂夜了无痕1 小时前
MySQL 主从延迟全链路根因诊断与破局法则
数据库·mysql·mysql主从延迟
爱丽_1 小时前
分页为什么越翻越慢:offset 陷阱、seek 分页与索引排序优化
数据库·mysql
APguantou1 小时前
NCRE-三级数据库技术-第12章-备份与数据库恢复
数据库·sqlserver
Bat U1 小时前
MySQL数据库|表设计+新增+分组查询
数据库·mysql
AI周红伟2 小时前
OpenClaw是什么?OpenClaw能做什么?OpenClaw详细介绍及保姆级部署教程-周红伟
大数据·运维·服务器·人工智能·微信·openclaw