Row-based Operations

This page describes how to use row-based operations in PyFlink Table API.

Map

Performs a map operation with a python general scalar function or vectorized scalar function. The output will be flattened if the output type is a composite type.

  1. from pyflink.common import Row
  2. from pyflink.table import EnvironmentSettings, TableEnvironment
  3. from pyflink.table.expressions import col
  4. from pyflink.table.types import DataTypes
  5. from pyflink.table.udf import udf
  6. env_settings = EnvironmentSettings.in_batch_mode()
  7. table_env = TableEnvironment.create(env_settings)
  8. table = table_env.from_elements([(1, 'Hi'), (2, 'Hello')], ['id', 'data'])
  9. @udf(result_type=DataTypes.ROW([DataTypes.FIELD("id", DataTypes.BIGINT()),
  10. DataTypes.FIELD("data", DataTypes.STRING())]))
  11. def func1(id: int, data: str) -> Row:
  12. return Row(id, data * 2)
  13. # the input columns are specified as the inputs
  14. table.map(func1(col('id'), col('data'))).execute().print()
  15. # result is
  16. #+----------------------+--------------------------------+
  17. #| id | data |
  18. #+----------------------+--------------------------------+
  19. #| 1 | HiHi |
  20. #| 2 | HelloHello |
  21. #+----------------------+--------------------------------+

It also supports to take a Row object (containing all the columns of the input table) as input.

  1. @udf(result_type=DataTypes.ROW([DataTypes.FIELD("id", DataTypes.BIGINT()),
  2. DataTypes.FIELD("data", DataTypes.STRING())]))
  3. def func2(data: Row) -> Row:
  4. return Row(data.id, data.data * 2)
  5. # specify the function without the input columns
  6. table.map(func2).execute().print()
  7. # result is
  8. #+----------------------+--------------------------------+
  9. #| id | data |
  10. #+----------------------+--------------------------------+
  11. #| 1 | HiHi |
  12. #| 2 | HelloHello |
  13. #+----------------------+--------------------------------+

Note The input columns should not be specified when using func2 in the map operation.

It also supports to use vectorized scalar function in the map operation. It should be noted that the input type and output type should be pandas.DataFrame instead of Row in this case.

  1. import pandas as pd
  2. @udf(result_type=DataTypes.ROW([DataTypes.FIELD("id", DataTypes.BIGINT()),
  3. DataTypes.FIELD("data", DataTypes.STRING())]),
  4. func_type='pandas')
  5. def func3(data: pd.DataFrame) -> pd.DataFrame:
  6. res = pd.concat([data.id, data.data * 2], axis=1)
  7. return res
  8. table.map(func3).execute().print()
  9. # result is
  10. #+----------------------+--------------------------------+
  11. #| id | data |
  12. #+----------------------+--------------------------------+
  13. #| 1 | HiHi |
  14. #| 2 | HelloHello |
  15. #+----------------------+--------------------------------+

FlatMap

Performs a flat_map operation with a python table function.

  1. from pyflink.common import Row
  2. from pyflink.table.udf import udtf
  3. from pyflink.table import DataTypes, EnvironmentSettings, TableEnvironment
  4. env_settings = EnvironmentSettings.in_batch_mode()
  5. table_env = TableEnvironment.create(env_settings)
  6. table = table_env.from_elements([(1, 'Hi,Flink'), (2, 'Hello')], ['id', 'data'])
  7. @udtf(result_types=[DataTypes.INT(), DataTypes.STRING()])
  8. def split(x: Row) -> Row:
  9. for s in x.data.split(","):
  10. yield x.id, s
  11. # use split in `flat_map`
  12. table.flat_map(split).execute().print()
  13. # result is
  14. #+-------------+--------------------------------+
  15. #| f0 | f1 |
  16. #+-------------+--------------------------------+
  17. #| 1 | Hi |
  18. #| 1 | Flink |
  19. #| 2 | Hello |
  20. #+-------------+--------------------------------+

The python table function could also be used in join_lateral and left_outer_join_lateral.

  1. # use table function in `join_lateral` or `left_outer_join_lateral`
  2. table.join_lateral(split.alias('a', 'b')).execute().print()
  3. # result is
  4. #+----------------------+--------------------------------+-------------+--------------------------------+
  5. #| id | data | a | b |
  6. #+----------------------+--------------------------------+-------------+--------------------------------+
  7. #| 1 | Hi,Flink | 1 | Hi |
  8. #| 1 | Hi,Flink | 1 | Flink |
  9. #| 2 | Hello | 2 | Hello |
  10. #+----------------------+--------------------------------+-------------+--------------------------------+

Aggregate

Performs an aggregate operation with a python general aggregate function or vectorized aggregate function.

  1. from pyflink.common import Row
  2. from pyflink.table import DataTypes, EnvironmentSettings, TableEnvironment
  3. from pyflink.table.expressions import col
  4. from pyflink.table.udf import AggregateFunction, udaf
  5. class CountAndSumAggregateFunction(AggregateFunction):
  6. def get_value(self, accumulator):
  7. return Row(accumulator[0], accumulator[1])
  8. def create_accumulator(self):
  9. return Row(0, 0)
  10. def accumulate(self, accumulator, row):
  11. accumulator[0] += 1
  12. accumulator[1] += row.b
  13. def retract(self, accumulator, row):
  14. accumulator[0] -= 1
  15. accumulator[1] -= row.b
  16. def merge(self, accumulator, accumulators):
  17. for other_acc in accumulators:
  18. accumulator[0] += other_acc[0]
  19. accumulator[1] += other_acc[1]
  20. def get_accumulator_type(self):
  21. return DataTypes.ROW(
  22. [DataTypes.FIELD("a", DataTypes.BIGINT()),
  23. DataTypes.FIELD("b", DataTypes.BIGINT())])
  24. def get_result_type(self):
  25. return DataTypes.ROW(
  26. [DataTypes.FIELD("a", DataTypes.BIGINT()),
  27. DataTypes.FIELD("b", DataTypes.BIGINT())])
  28. function = CountAndSumAggregateFunction()
  29. agg = udaf(function,
  30. result_type=function.get_result_type(),
  31. accumulator_type=function.get_accumulator_type(),
  32. name=str(function.__class__.__name__))
  33. # aggregate with a python general aggregate function
  34. env_settings = EnvironmentSettings.in_streaming_mode()
  35. table_env = TableEnvironment.create(env_settings)
  36. t = table_env.from_elements([(1, 2), (2, 1), (1, 3)], ['a', 'b'])
  37. result = t.group_by(col('a')) \
  38. .aggregate(agg.alias("c", "d")) \
  39. .select(col('a'), col('c'), col('d'))
  40. result.execute().print()
  41. # the result is
  42. #+----+----------------------+----------------------+----------------------+
  43. #| op | a | c | d |
  44. #+----+----------------------+----------------------+----------------------+
  45. #| +I | 1 | 2 | 5 |
  46. #| +I | 2 | 1 | 1 |
  47. #+----+----------------------+----------------------+----------------------+
  48. # aggregate with a python vectorized aggregate function
  49. env_settings = EnvironmentSettings.in_batch_mode()
  50. table_env = TableEnvironment.create(env_settings)
  51. t = table_env.from_elements([(1, 2), (2, 1), (1, 3)], ['a', 'b'])
  52. pandas_udaf = udaf(lambda pd: (pd.b.mean(), pd.b.max()),
  53. result_type=DataTypes.ROW(
  54. [DataTypes.FIELD("a", DataTypes.FLOAT()),
  55. DataTypes.FIELD("b", DataTypes.INT())]),
  56. func_type="pandas")
  57. t.aggregate(pandas_udaf.alias("a", "b")) \
  58. .select(col('a'), col('b')).execute().print()
  59. # the result is
  60. #+--------------------------------+-------------+
  61. #| a | b |
  62. #+--------------------------------+-------------+
  63. #| 2.0 | 3 |
  64. #+--------------------------------+-------------+

Note Similar to map operation, if you specify the aggregate function without the input columns in aggregate operation, it will take Row or Pandas.DataFrame as input which contains all the columns of the input table including the grouping keys. Note You have to close the “aggregate” with a select statement and it should not contain aggregate functions in the select statement. Besides, the output of aggregate will be flattened if it is a composite type.

FlatAggregate

Performs a flat_aggregate operation with a python general Table Aggregate Function

Similar to GroupBy Aggregation, FlatAggregate groups the inputs on the grouping keys. Different from AggregateFunction, TableAggregateFunction could return 0, 1, or more records for a grouping key. Similar to aggregate, you have to close the flat_aggregate with a select statement and the select statement should not contain aggregate functions.

  1. from pyflink.common import Row
  2. from pyflink.table import DataTypes, TableEnvironment, EnvironmentSettings
  3. from pyflink.table.expressions import col
  4. from pyflink.table.udf import udtaf, TableAggregateFunction
  5. class Top2(TableAggregateFunction):
  6. def emit_value(self, accumulator):
  7. yield Row(accumulator[0])
  8. yield Row(accumulator[1])
  9. def create_accumulator(self):
  10. return [None, None]
  11. def accumulate(self, accumulator, row):
  12. if row.a is not None:
  13. if accumulator[0] is None or row.a > accumulator[0]:
  14. accumulator[1] = accumulator[0]
  15. accumulator[0] = row.a
  16. elif accumulator[1] is None or row.a > accumulator[1]:
  17. accumulator[1] = row.a
  18. def get_accumulator_type(self):
  19. return DataTypes.ARRAY(DataTypes.BIGINT())
  20. def get_result_type(self):
  21. return DataTypes.ROW(
  22. [DataTypes.FIELD("a", DataTypes.BIGINT())])
  23. env_settings = EnvironmentSettings.in_streaming_mode()
  24. table_env = TableEnvironment.create(env_settings)
  25. # the result type and accumulator type can also be specified in the udtaf decorator:
  26. # top2 = udtaf(Top2(), result_type=DataTypes.ROW([DataTypes.FIELD("a", DataTypes.BIGINT())]), accumulator_type=DataTypes.ARRAY(DataTypes.BIGINT()))
  27. top2 = udtaf(Top2())
  28. t = table_env.from_elements([(1, 'Hi', 'Hello'),
  29. (3, 'Hi', 'hi'),
  30. (5, 'Hi2', 'hi'),
  31. (7, 'Hi', 'Hello'),
  32. (2, 'Hi', 'Hello')],
  33. ['a', 'b', 'c'])
  34. # call function "inline" without registration in Table API
  35. result = t.group_by(col('b')).flat_aggregate(top2).select(col('*')).to_pandas()
  36. # the result is:
  37. #+----+--------------------------------+----------------------+
  38. #| op | b | a |
  39. #+----+--------------------------------+----------------------+
  40. #| +I | Hi2 | 5 |
  41. #| +I | Hi2 | <NULL> |
  42. #| +I | Hi | 7 |
  43. #| +I | Hi | 3 |
  44. #+----+--------------------------------+----------------------+