MapReduce数据清

Posted hemomo

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了MapReduce数据清相关的知识,希望对你有一定的参考价值。

说明:数据清洗的过程往往只需要运行Mapper程序,不需要运行Reduce程序。

已采集到日志数据存入web.log文件中,其中一条日志格式如下:

101.206.68.147 - - [18/Sep/2018:20:05:16 +0000] "HEAD / HTTP/1.2" 200 20 "-" "DNSPod-Monitor/1.0"

清洗目标:清除日志中字段长度比11小的日志记录。

具体代码如下:

项目1数据清洗一

新建包com.scitc.clean

1.编写LogMapper类:

package com.scitc.clean;

import java.io.IOException;

import org.apache.hadoop.io.LongWritable;

import org.apache.hadoop.io.NullWritable;

import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Mapper;

 

public class LogMapper extends Mapper<LongWritable, Text, Text, NullWritable> {

   Text k = new Text();

   @Override

   protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

      // 1 获取1行数据

      String line = value.toString();

      // 2 解析日志

      boolean result = parseLog(line,context);

      // 3 日志不合法退出

      if (!result) {

         return;

      }

      // 4 设置key

      k.set(line);

      // 5 写出数据

      context.write(k, NullWritable.get());

   }

   /**

    * 功能:解析日志

    * @param line  日志内容

    * @param context  上下文对象

    * @return

    */

   private boolean parseLog(String line, Context context) {

      // 1 截取

      String[] fields = line.split(" ");

      // 2 日志长度大于11的为合法

      if (fields.length > 11) {

         // 系统计数器

         context.getCounter("map", "true").increment(1);

         return true;

      }else {

         context.getCounter("map", "false").increment(1);

         return false;

      }

   }

}

2.编写LogDriver类

package com.scitc.clean;

import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.fs.Path;

import org.apache.hadoop.io.NullWritable;

import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Job;

import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

 

public class LogDriver {

   public static void main(String[] args) throws Exception {

      //设置输入输出路径设置

        args = new String[] { "E:/hadoop开发文件/input", "E:/hadoop开发文件/output" };

      //1 获取job信息

      Configuration conf = new Configuration();

      Job job = Job.getInstance(conf);

      //2 加载jar包

      job.setJarByClass(LogDriver.class);

      //3 关联map

      job.setMapperClass(LogMapper.class);

      //4 设置最终输出类型

      job.setOutputKeyClass(Text.class);

      job.setOutputValueClass(NullWritable.class);

      //设置reducetask个数为0

      job.setNumReduceTasks(0);

      // 5 设置输入和输出路径

      FileInputFormat.setInputPaths(job, new Path(args[0]));

      FileOutputFormat.setOutputPath(job, new Path(args[1]));

      //6 提交job

      job.waitForCompletion(true);

   }  }

本地测试:

右键LogDriver类àrun asàjava application

即可在输出目录查看到清洗后的数据。

也可以打包、上传、在集群上运行,但注意修改输入、输出路径。

 

项目2数据清洗二

通过自定义的Bean对象封装清洗后的日志数据。

1.编写UpLogBean类

package com.scitc.clean;

public class UpLogBean {

   private String remote_addr;// 记录客户端的ip地址

   private String remote_user;// 记录客户端用户名称,忽略属性"-"

   private String time_local;// 记录访问时间与时区

   private String request;// 记录请求的urlhttp协议

   private String status;// 记录请求状态;成功是200

   private String body_bytes_sent;// 记录发送给客户端文件主体内容大小

   private String http_referer;// 用来记录从那个页面链接访问过来的

   private String http_user_agent;// 记录客户浏览器的相关信息

 

   private boolean valid = true;// 判断数据是否合法

   public String getRemote_addr() {

      return remote_addr;

   }

   public void setRemote_addr(String remote_addr) {

      this.remote_addr = remote_addr;

   }

   public String getRemote_user() {

      return remote_user;

   }

   public void setRemote_user(String remote_user) {

      this.remote_user = remote_user;

   }

   public String getTime_local() {

      return time_local;

   }

   public void setTime_local(String time_local) {

      this.time_local = time_local;

   }

   public String getRequest() {

      return request;

   }

   public void setRequest(String request) {

      this.request = request;

   }

   public String getStatus() {

      return status;

   }

   public void setStatus(String status) {

      this.status = status;

   }

   public String getBody_bytes_sent() {

      return body_bytes_sent;

   }

   public void setBody_bytes_sent(String body_bytes_sent) {

      this.body_bytes_sent = body_bytes_sent;

   }

   public String getHttp_referer() {

      return http_referer;

   }

   public void setHttp_referer(String http_referer) {

      this.http_referer = http_referer;

   }

   public String getHttp_user_agent() {

      return http_user_agent;

   }

   public void setHttp_user_agent(String http_user_agent) {

      this.http_user_agent = http_user_agent;

   }

   public boolean isValid() {

      return valid;

   }

   public void setValid(boolean valid) {

      this.valid = valid;

   }

   @Override

   public String toString() {

      StringBuilder sb = new StringBuilder();

      sb.append(this.valid);

      sb.append("01").append(this.remote_addr);

      sb.append("01").append(this.remote_user);

      sb.append("01").append(this.time_local);

      sb.append("01").append(this.request);

      sb.append("01").append(this.status);

      sb.append("01").append(this.body_bytes_sent);

      sb.append("01").append(this.http_referer);

      sb.append("01").append(this.http_user_agent);

      return sb.toString();

   }  }

2.编写UpLogMapper类

package com.scitc.clean;

import java.io.IOException;

import org.apache.hadoop.io.LongWritable;

import org.apache.hadoop.io.NullWritable;

import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Mapper;

 

public class UpLogMapper extends Mapper<LongWritable, Text, Text, NullWritable> {

   Text k = new Text();

   @Override

   protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

      // 1 获取1行

      String line = value.toString();

      // 2 解析日志是否合法

      UpLogBean bean = parseLog(line);

      if (!bean.isValid()) {

         return;

      }    

      k.set(bean.toString());    

      // 3 输出

      context.write(k, NullWritable.get());

   }

   // 解析日志

   private UpLogBean parseLog(String line) {

      UpLogBean logBean = new UpLogBean();

      // 1 截取

      String[] fields = line.split(" ");

      if (fields.length > 11) {

         // 2封装数据

         logBean.setRemote_addr(fields[0]);

         logBean.setRemote_user(fields[1]);

         logBean.setTime_local(fields[3].substring(1));

         logBean.setRequest(fields[6]);

         logBean.setStatus(fields[8]);

         logBean.setBody_bytes_sent(fields[9]);

         logBean.setHttp_referer(fields[10]);

        

         if (fields.length > 12) {

            logBean.setHttp_user_agent(fields[11] + " "+ fields[12]);

         }else {

            logBean.setHttp_user_agent(fields[11]);

         }

         //大于400,HTTP错误

         if (Integer.parseInt(logBean.getStatus()) >= 400) {

            logBean.setValid(false);

         }

      }else {

         logBean.setValid(false);

      }

      return logBean;

   }  }

3.编写UpLogDriver类

package com.scitc.clean;

import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.fs.Path;

import org.apache.hadoop.io.NullWritable;

import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Job;

import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

 

public class UpLogDriver {

   public static void main(String[] args) throws Exception {

      args = new String[] { "E:/hadoop开发文件/input", "E:/hadoop开发文件/upoutput" };

              // 1 获取job信息

            Configuration conf = new Configuration();

            Job job = Job.getInstance(conf);

            // 2 加载jar包

            job.setJarByClass(UpLogDriver.class);

            // 3 关联map

            job.setMapperClass(UpLogMapper.class);

            // 4 设置最终输出类型

            job.setOutputKeyClass(Text.class);

            job.setOutputValueClass(NullWritable.class);

            // 5 设置输入和输出路径

            FileInputFormat.setInputPaths(job, new Path(args[0]));

            FileOutputFormat.setOutputPath(job, new Path(args[1]));

            // 6 提交

            job.waitForCompletion(true);

         }  } 

本地测试:

右键UpLogDriver类àrun asàjava application

即可在输出目录查看到清洗后的数据。

也可以打包、上传、在集群上运行,但注意修改输入、输出路径。

以上是关于MapReduce数据清的主要内容,如果未能解决你的问题,请参考以下文章

mapreduce 数据去重 问题

mapreduce工作原理

大数据之Hadoop(MapReduce):MapReduce核心思想

大数据之Hadoop(MapReduce): MapReduce概述

Hadoop MapReduce计算框架

(大数据)MapReduce