對機器學習會有很大的幫助,模型簡單,易用
Apache Beam 的兩大特點
1、將數據的批處理(batch)和流處理(stream)編程范式進行了統一;
2、能夠在任何的執行引擎上運行。
它不僅為模型設計、更為執行一系列數據導向的工作流提供了統一的模型。這些工作流包括數據處理、吸收和整合。
新建maven項目
pom.xml加入依賴
<code>
<dependencies>
<dependency>
<groupId>org.apache.beam</groupId>
<artifactId>beam-sdks-java-core</artifactId>
<version>0.4.0</version>
</dependency>
<dependency>
<groupId>org.apache.beam</groupId>
<artifactId>beam-runners-direct-java</artifactId>
<version>0.4.0</version>
</dependency>
</dependencies>
</code>
測試類WordCount.java
<code>
package org.tom;
import org.apache.beam.sdk.Pipeline;
import org.apache.beam.sdk.io.TextIO;
import org.apache.beam.sdk.options.PipelineOptions;
import org.apache.beam.sdk.options.PipelineOptionsFactory;
import org.apache.beam.sdk.transforms.*;
import org.apache.beam.sdk.values.KV;
import org.apache.beam.sdk.values.PCollection;
import java.io.Serializable;
public class WordCount implements Serializable{
private transient Pipeline pipeline = null;
public WordCount() {
PipelineOptions options = PipelineOptionsFactory.create();
options.setJobName("wordcount");
pipeline = Pipeline.create(options);
}
public void transform() {
PCollection<String> collection = pipeline.apply(TextIO.Read.from("file:///d:/tom/beam-test/src/main/resources/word.txt"));
PCollection<String> extractWords = collection.apply("ExtractWords", ParDo.of(new DoFn<String, String>() {
@ProcessElement
public void processElement(ProcessContext c) {
String[] split = c.element().split(" ");
for (String word : split) {
if (!word.isEmpty()) {
c.output(word);
}
}
}
}));
PCollection<KV<String, Long>> pCollection = extractWords.apply(Count.<String>perElement());
PCollection<String> formatResults = pCollection.apply("FormatResults", MapElements.via(new SimpleFunction<KV<String, Long>, String>() {
public String apply(KV<String, Long> input) {
return input.getKey() + ": " + input.getValue();
}
}));
formatResults.apply(TextIO.Write.to("D:\tom\beam-test\src\main\resources\wordcounts"));
}
public void run(){
pipeline.run().waitUntilFinish();
}
public static void main(String[] args) {
WordCount wordCount = new WordCount();
wordCount.transform();
wordCount.run();
}
}
</code>
統計文本\resources\word.txt
<code>
tom
hello
tom
luo
hello
tom
tom
word
word
word
tom
</code>
運行結果
word: 3
luo: 1
tom: 5
hello: 2
結果生成了兩個文件,是由于hash分區了