原文链接 : http://zeppelin.apache.org/docs/0.7.2/interpreter/beam.html

译文链接 : http://cwiki.apachecn.org/pages/viewpage.action?pageId=10030766

贡献者 : 片刻 ApacheCN Apache中文网

概观

Apache Beam是数据处理流水线的开源统一平台。可以使用其中一个Beam SDK构建管道。管道的执行由不同的跑步者完成。目前,Beam支持Apache Flink Runner,Apache Spark Runner和Google Dataflow Runner。

如何使用

基本上,您可以编写正常的Beam java代码,您可以在其中确定Runner。您应该在类中写入main方法,因为解释器调用此main来执行管道。与Zeppelin正常模式不同,每段都被视为单独的工作,与任何其他段落没有任何关系。

下面是一个字的一个示范与字符串数组表示的数据计数例如但它可以通过更换从文件中读取数据Create.of(SENTENCES).withCoder(StringUtf8Coder.of())TextIO.Read.from("path/to/filename.txt")

  1. %beam
  2.  
  3. // most used imports
  4. import org.apache.beam.sdk.coders.StringUtf8Coder;
  5. import org.apache.beam.sdk.transforms.Create;
  6. import java.io.Serializable;
  7. import java.util.Arrays;
  8. import java.util.List;
  9. import java.util.ArrayList;
  10. import org.apache.spark.api.java.*;
  11. import org.apache.spark.api.java.function.Function;
  12. import org.apache.spark.SparkConf;
  13. import org.apache.spark.streaming.*;
  14. import org.apache.spark.SparkContext;
  15. import org.apache.beam.runners.direct.*;
  16. import org.apache.beam.sdk.runners.*;
  17. import org.apache.beam.sdk.options.*;
  18. import org.apache.beam.runners.spark.*;
  19. import org.apache.beam.runners.spark.io.ConsoleIO;
  20. import org.apache.beam.runners.flink.*;
  21. import org.apache.beam.runners.flink.examples.WordCount.Options;
  22. import org.apache.beam.sdk.Pipeline;
  23. import org.apache.beam.sdk.io.TextIO;
  24. import org.apache.beam.sdk.options.PipelineOptionsFactory;
  25. import org.apache.beam.sdk.transforms.Count;
  26. import org.apache.beam.sdk.transforms.DoFn;
  27. import org.apache.beam.sdk.transforms.MapElements;
  28. import org.apache.beam.sdk.transforms.ParDo;
  29. import org.apache.beam.sdk.transforms.SimpleFunction;
  30. import org.apache.beam.sdk.values.KV;
  31. import org.apache.beam.sdk.options.PipelineOptions;
  32.  
  33. public class MinimalWordCount {
  34. static List<String> s = new ArrayList<>();
  35.  
  36. static final String[] SENTENCES_ARRAY = new String[] {
  37. "Hadoop is the Elephant King!",
  38. "A yellow and elegant thing.",
  39. "He never forgets",
  40. "Useful data, or lets",
  41. "An extraneous element cling!",
  42. "A wonderful king is Hadoop.",
  43. "The elephant plays well with Sqoop.",
  44. "But what helps him to thrive",
  45. "Are Impala, and Hive,",
  46. "And HDFS in the group.",
  47. "Hadoop is an elegant fellow.",
  48. "An elephant gentle and mellow.",
  49. "He never gets mad,",
  50. "Or does anything bad,",
  51. "Because, at his core, he is yellow",
  52. };
  53. static final List<String> SENTENCES = Arrays.asList(SENTENCES_ARRAY);
  54. public static void main(String[] args) {
  55. Options options = PipelineOptionsFactory.create().as(Options.class);
  56. options.setRunner(FlinkRunner.class);
  57. Pipeline p = Pipeline.create(options);
  58. p.apply(Create.of(SENTENCES).withCoder(StringUtf8Coder.of()))
  59. .apply("ExtractWords", ParDo.of(new DoFn<String, String>() {
  60. @Override
  61. public void processElement(ProcessContext c) {
  62. for (String word : c.element().split("[^a-zA-Z']+")) {
  63. if (!word.isEmpty()) {
  64. c.output(word);
  65. }
  66. }
  67. }
  68. }))
  69. .apply(Count.<String> perElement())
  70. .apply("FormatResults", ParDo.of(new DoFn<KV<String, Long>, String>() {
  71. @Override
  72. public void processElement(DoFn<KV<String, Long>, String>.ProcessContext arg0)
  73. throws Exception {
  74. s.add("\n" + arg0.element().getKey() + "\t" + arg0.element().getValue());
  75. }
  76. }));
  77. p.run();
  78. System.out.println("%table word\tcount");
  79. for (int i = 0; i < s.size(); i++) {
  80. System.out.print(s.get(i));
  81. }
  82.  
  83. }
  84. }