Hadoop+Idea+maven 遠程提交集群 mapreduce作業

環境準備

1 本地環境:win7(64位)、Intellij IDEA 2017.1.4
2 集群環境:ambri集群,Centos6.5、hadoop2.7.3.共七個節點如下圖:


image.png

3 本文以最簡單的WordCount為例

配置及代碼編寫

1.首先將集群上的hadoop環境下載到本地,本文下載到


image.png

2.本地環境變量配置


image.png
  1. 去網上下載對應hadoop版本的hadoop.dll、winutils.exe,分別放到目錄“C:\Windows\System32”和“$HADOOP_HOME\bin”下。
    說明:hadoop.dll主要是防止插件報各種莫名錯誤,比如空對象引用,我本來以為intellij idea不需要安裝,結果被空指針錯誤拖了很久。
  2. 修改本地hosts文件。在目錄"C:\Windows\System32\drivers\etc"下


    image.png
  3. 在intellij idea下新建maven 項目


    image.png

    其中,gkd.xgs.yxm是我的package,里面是WordCount.java程序。
    resources是我新建的文件夾,里面需要存放hadoop集群中配置文件core-site.xml、mapred-site.xml、yarn-site.xml,此外,將log4j.properties文件也放在下面。
    resources目錄結構如上圖。
    其中core-site.xml、mapred-site.xml、yarn-site.xml,這幾個文件從集群上復制過來,但是ambri集群有個問題這里需要注意一下。


    image.png

    這里需要加一個hdp.version,否則出錯。
    6.pom.xml配置

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>gkd.xgs.yxm</groupId>
    <artifactId>WordC</artifactId>
    <version>1.0-SNAPSHOT</version>
    <packaging>jar</packaging>

    <name>WordC</name>
    <url>http://maven.apache.org</url>

    <properties>
        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
    </properties>

    <repositories>
        <repository>
            <id>alimaven</id>
            <name>aliyun maven</name>
            <url>http://maven.aliyun.com/nexus/content/groups/public/</url>
            <releases>
                <enabled>true</enabled>
            </releases>
            <snapshots>
                <enabled>false</enabled>
            </snapshots>
        </repository>
        <repository>
            <id>oschina</id>
            <name>oschina maven</name>
            <url>http://maven.oschina.net/content/groups/public/</url>
        </repository>
        <repository>
            <id>central</id>
            <name>central maven</name>
            <url>http://repo1.maven.org/maven2/</url>
        </repository>
        <repository>
            <id>Akka repository</id>
            <url>http://repo.akka.io/releases</url>
        </repository>
        <repository>
            <id>hadoop-pcap</id>
            <url>http://dl.bintray.com/hadoop-pcap/hadoop-pcap</url>
        </repository>
    </repositories>
    <dependencies>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-common</artifactId>
            <version>2.7.3</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-mapreduce-client-core</artifactId>
            <version>2.7.3</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-hdfs</artifactId>
            <version>2.7.3</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
            <version>2.7.3</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-mapreduce-client-common</artifactId>
            <version>2.7.3</version>
        </dependency>
        <dependency>
            <groupId>junit</groupId>
            <artifactId>junit</artifactId>
            <version>4.12</version>
            <scope>test</scope>
        </dependency>
    </dependencies>

    <build>
        <plugins>
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-assembly-plugin</artifactId>
                <version>2.5.5</version>
                <configuration>
                    <archive>
                        <manifest>
                            <addClasspath>true</addClasspath>
                            <classpathPrefix>lib/</classpathPrefix>

                            <mainClass>gkd.xgs.yxm.WordCount</mainClass>

                        </manifest>
                    </archive>
                    <descriptorRefs>
                        <descriptorRef>jar-with-dependencies</descriptorRef>
                    </descriptorRefs>
                </configuration>
                <executions>
                    <execution>
                        <id>make-assembly</id>
                        <phase>package</phase>
                        <goals>
                            <goal>single</goal>
                        </goals>
                    </execution>
                </executions>
            </plugin>
        </plugins>
    </build>

</project>

7.編寫mapreduce主體代碼。

package gkd.xgs.yxm;

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class WordCount {
    public static class TokenizerMapper
            extends Mapper<Object, Text, Text, IntWritable> {  //4個類型分別是:輸入key類型、輸入value類型、輸出key類型、輸出value類型

        //MapReduce框架讀到一行數據侯以key value形式傳進來,
        // key默認情況下是mr框架所讀到一行文本的起始偏移量(Long類型),value默認情況下是mr框架所讀到的一行的數據內容(String類型)

        //這里的數據類型和我們常用的不一樣,因為MapReduce程序的輸出數據需要在不同機器間傳輸,所以必須是可序列化的,
        // 例如Long類型,Hadoop中定義了自己的可序列化類型LongWritable,String對應的是Text,int對應的是IntWritable。
        private final static IntWritable one = new IntWritable(1);
        private Text word = new Text();

        public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
            StringTokenizer itr = new StringTokenizer(value.toString());
            while (itr.hasMoreTokens()) {
                word.set(itr.nextToken());
                context.write(word, one);
            }
        }
    }

    //Reducer 4個類型分別指:輸入key的類型、輸入value的類型、輸出key的類型、輸出value的類型
    //這里需要注意的是,reduce方法接受的是:一個字符串類型的key、一個可迭代的數據集,因為reduce任務讀取到map
    //任務處理結果是這樣的:(good,1)(good,1)(good,1)(good,1)
    //當傳給reduce方法時,就變為:
    //key:good
    //value:(1,1,1,1)
    public static class IntSumReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
        private IntWritable result = new IntWritable();

        public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
            int sum = 0;
            for (IntWritable val : values) {
                sum += val.get();
            }
            result.set(sum);
            context.write(key, result);
        }
    }

    public static void main(String[] args) throws Exception {
        //創建配置對象
        Configuration conf = new Configuration();
        conf.set("mapreduce.app-submission.cross-platform", "true");
        conf.set("mapreduce.framework.name", "yarn");
        conf.set("mapreduce.job.jar","D:\\workspace\\WordC\\target\\WordC-1.0-SNAPSHOT-jar-with-dependencies.jar");
        String[] ioArgs = new String[] { "hdfs://rsct0:8020/input/test1.txt", "hdfs://rsct0:8020/output_test" };
        String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
        if (otherArgs.length < 2) {
            System.err.println("Usage: wordcount <in> [<in>...] <out>");
            System.exit(2);
        }
        //創建job對象
        Job job = Job.getInstance(conf, "word count");
        //設置運行job的類
        job.setJarByClass(WordCount.class);

        //設置map、combine、reduce處理類
        job.setMapperClass(TokenizerMapper.class);
        job.setCombinerClass(IntSumReducer.class);
        job.setReducerClass(IntSumReducer.class);

        //設置輸出類型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        //設置輸入輸出目錄
        for (int i = 0; i < otherArgs.length - 1; ++i) {
            FileInputFormat.addInputPath(job, new Path(otherArgs[i]));
        }
        FileOutputFormat.setOutputPath(job,
                new Path(otherArgs[otherArgs.length - 1]));
        //提交job
        boolean isSuccess = job.waitForCompletion(true);
        System.exit(isSuccess ? 0 : 1);
    }
}


maven打包

image.png

設置運行環境

image.png
最后編輯于
?著作權歸作者所有,轉載或內容合作請聯系作者
平臺聲明:文章內容(如有圖片或視頻亦包括在內)由作者上傳并發布,文章內容僅代表作者本人觀點,簡書系信息發布平臺,僅提供信息存儲服務。

推薦閱讀更多精彩內容