Write Records to Streams

This document provides information about how to write data to streams in HStreamDB using hstreamdb-java or clients implemented in other languages.

You can also read the following pages to get a more thorough understanding:

To write data to HStreamDB, we need to pack messages as HStream Records and a producer that creates and sends messages to servers.

HStream Record

All data in streams are in the form of an HStream Record. There are two kinds of HStream Record:

  • HRecord: You can think of an hrecord as a piece of JSON data, just like the document in some NoSQL databases.
  • Raw Record: Arbitrary binary data.

End-to-End Compression

To reduce transfer overhead and maximize bandwidth utilization, HStreamDB supports the compression of written HStream records. Users can set the compression algorithm when creating a BufferedProducer. Currently, HStreamDB supports both gzip and zstd compression algorithms. Compressed records are automatically decompressed by the client when they are consumed from HStreamDB.

Write HStream Records

There are two ways to write records to servers. For simplicity, you can use Producer from client.newProducer() to start with. Producers do not provide any configure options, it simply sends records to servers as soon as possible, and all these records are sent in parallel, which means they are unordered. In practice, BufferedProducer from the client.newBufferedProducer() would always be better. BufferedProducer will buffer records in order as a batch and send the batch to servers. When a record is written to the stream, HStream Server will generate a corresponding record id for the record and send it back to the client. The record id is unique in the stream.

Write Records Using a Producer

  1. // WriteDataSimpleExample.java
  2. package docs.code.examples;
  3. import io.hstream.*;
  4. import io.hstream.Record;
  5. import java.nio.charset.StandardCharsets;
  6. import java.util.Arrays;
  7. import java.util.List;
  8. import java.util.concurrent.CompletableFuture;
  9. public class WriteDataSimpleExample {
  10. public static void main(String[] args) throws Exception {
  11. // TODO (developers): Replace these variables for your own use cases.
  12. String serviceUrl = "127.0.0.1:6570";
  13. if (System.getenv("serviceUrl") != null) {
  14. serviceUrl = System.getenv("serviceUrl");
  15. }
  16. String streamName1 = "your_h_records_stream_name";
  17. String streamName2 = "your_raw_records_stream_name";
  18. // We do not recommend write both raw data and HRecord data into the same stream.
  19. HStreamClient client = HStreamClient.builder().serviceUrl(serviceUrl).build();
  20. writeHRecordData(client, streamName1);
  21. writeRawData(client, streamName2);
  22. client.close();
  23. }
  24. public static void writeHRecordData(HStreamClient client, String streamName) {
  25. // Create a basic producer for low latency scenarios
  26. // For high throughput scenarios, please see the next section "Using `BufferedProducer`s"
  27. Producer producer = client.newProducer().stream(streamName).build();
  28. HRecord hRecord =
  29. HRecord.newBuilder()
  30. // Number
  31. .put("id", 10)
  32. // Boolean
  33. .put("isReady", true)
  34. // List
  35. .put("targets", HArray.newBuilder().add(1).add(2).add(3).build())
  36. // String
  37. .put("name", "hRecord-example")
  38. .build();
  39. for (int i = 0; i <= 3000; i++) {
  40. Record record = Record.newBuilder().hRecord(hRecord).build();
  41. // If the data is written successfully, returns a server-assigned record id
  42. CompletableFuture<String> recordId = producer.write(record);
  43. System.out.println("Wrote message ID: " + recordId.join());
  44. }
  45. }
  46. private static void writeRawData(HStreamClient client, String streamName) {
  47. Producer producer = client.newProducer().stream(streamName).build();
  48. List<String> messages = Arrays.asList("first", "second");
  49. for (final String message : messages) {
  50. Record record =
  51. Record.newBuilder().rawRecord(message.getBytes(StandardCharsets.UTF_8)).build();
  52. CompletableFuture<String> recordId = producer.write(record);
  53. System.out.println("Published message ID: " + recordId.join());
  54. }
  55. }
  56. }
  1. // ExampleWriteProducer.go
  2. package examples
  3. import (
  4. "github.com/hstreamdb/hstreamdb-go/hstream"
  5. "github.com/hstreamdb/hstreamdb-go/hstream/Record"
  6. "log"
  7. )
  8. func ExampleWriteProducer() error {
  9. client, err := hstream.NewHStreamClient(YourHStreamServiceUrl)
  10. if err != nil {
  11. log.Fatalf("Creating client error: %s", err)
  12. }
  13. defer client.Close()
  14. producer, err := client.NewProducer("testStream")
  15. if err != nil {
  16. log.Fatalf("Creating producer error: %s", err)
  17. }
  18. defer producer.Stop()
  19. payload := []byte("Hello HStreamDB")
  20. rawRecord, err := Record.NewHStreamRawRecord("testStream", payload)
  21. if err != nil {
  22. log.Fatalf("Creating raw record error: %s", err)
  23. }
  24. for i := 0; i < 100; i++ {
  25. appendRes := producer.Append(rawRecord)
  26. if resp, err := appendRes.Ready(); err != nil {
  27. log.Printf("Append error: %s", err)
  28. } else {
  29. log.Printf("Append response: %s", resp)
  30. }
  31. }
  32. return nil
  33. }
  1. # https://github.com/hstreamdb/hstreamdb-py/blob/main/examples/snippets/guides.py
  2. import asyncio
  3. import hstreamdb
  4. import os
  5. # NOTE: Replace with your own host and port
  6. host = os.getenv("GUIDE_HOST", "127.0.0.1")
  7. port = os.getenv("GUIDE_PORT", 6570)
  8. stream_name = "your_stream"
  9. subscription = "your_subscription"
  10. # Run: asyncio.run(main(your_async_function))
  11. async def main(*funcs):
  12. async with await hstreamdb.insecure_client(host=host, port=port) as client:
  13. for f in funcs:
  14. await f(client)
  15. async def append_records(client):
  16. payloads = [b"some_raw_binary_bytes", {"msg": "hi"}]
  17. rs = await client.append(stream_name, payloads)
  18. for r in rs:
  19. print("Append done, ", r)

Write Records Using a Buffered Producer

In almost all scenarios, we would recommend using BufferedProducer whenever possible because it offers higher throughput and provides a very flexible configuration that allows you to adjust between throughput and latency as needed. You can configure the following two settings of BufferedProducer to control and set the trigger and the buffer size. With BatchSetting, you can determine when to send the batch based on the maximum number of records, byte size in the batch and the maximum age of the batch. By configuring FlowControlSetting, you can set the buffer for all records. The following code example shows how you can use BatchSetting to set responding triggers to notify when the producer should flush and FlowControlSetting to limit maximum bytes in a BufferedProducer.

  1. // WriteDataBufferedExample.java
  2. package docs.code.examples;
  3. import io.hstream.*;
  4. import io.hstream.Record;
  5. import java.util.ArrayList;
  6. import java.util.List;
  7. import java.util.Random;
  8. import java.util.concurrent.CompletableFuture;
  9. public class WriteDataBufferedExample {
  10. public static void main(String[] args) throws Exception {
  11. // TODO (developers): Replace these variables for your own use cases.
  12. String serviceUrl = "127.0.0.1:6570";
  13. if (System.getenv("serviceUrl") != null) {
  14. serviceUrl = System.getenv("serviceUrl");
  15. }
  16. String streamName = "your_h_records_stream_name";
  17. HStreamClient client = HStreamClient.builder().serviceUrl(serviceUrl).build();
  18. writeHRecordDataWithBufferedProducers(client, streamName);
  19. client.close();
  20. }
  21. public static void writeHRecordDataWithBufferedProducers(
  22. HStreamClient client, String streamName) {
  23. BatchSetting batchSetting =
  24. BatchSetting.newBuilder()
  25. // optional, default: 100, the max records count of a batch,
  26. // disable the trigger if the value <= 0.
  27. .recordCountLimit(100)
  28. // optional, default: 4096(4KB), the max bytes size of a batch,
  29. // disable the trigger if the value <= 0.
  30. .bytesLimit(4096)
  31. // optional, default: 100(ms), the max age of a buffering batch,
  32. // disable the trigger if the value <= 0.
  33. .ageLimit(100)
  34. .build();
  35. // FlowControlSetting is to control total records,
  36. // including buffered batch records and sending records
  37. FlowControlSetting flowControlSetting =
  38. FlowControlSetting.newBuilder()
  39. // Optional, the default: 104857600(100MB), total bytes limit, including buffered batch
  40. // records and
  41. // sending records, the value must be greater than batchSetting.bytesLimit
  42. .bytesLimit(40960)
  43. .build();
  44. BufferedProducer producer =
  45. client.newBufferedProducer().stream(streamName)
  46. .batchSetting(batchSetting)
  47. .flowControlSetting(flowControlSetting)
  48. .build();
  49. List<CompletableFuture<String>> recordIds = new ArrayList<>();
  50. Random random = new Random();
  51. for (int i = 0; i < 100; i++) {
  52. double temp = random.nextInt(100) / 10.0 + 15;
  53. HRecord hRecord = HRecord.newBuilder().put("temperature", temp).build();
  54. Record record = Record.newBuilder().hRecord(hRecord).build();
  55. CompletableFuture<String> recordId = producer.write(record);
  56. recordIds.add(recordId);
  57. }
  58. // close a producer, it will call flush() first
  59. producer.close();
  60. System.out.println("Wrote message IDs: " + recordIds.stream().map(CompletableFuture::join));
  61. }
  62. }
  1. // ExampleWriteBatchProducer.go
  2. package examples
  3. import (
  4. "github.com/hstreamdb/hstreamdb-go/hstream"
  5. "github.com/hstreamdb/hstreamdb-go/hstream/Record"
  6. "log"
  7. )
  8. func ExampleWriteBatchProducer() error {
  9. client, err := hstream.NewHStreamClient(YourHStreamServiceUrl)
  10. if err != nil {
  11. log.Fatalf("Creating client error: %s", err)
  12. }
  13. defer client.Close()
  14. producer, err := client.NewBatchProducer("testDefaultStream", hstream.WithBatch(10, 500))
  15. if err != nil {
  16. log.Fatalf("Creating producer error: %s", err)
  17. }
  18. defer producer.Stop()
  19. result := make([]hstream.AppendResult, 0, 100)
  20. for i := 0; i < 100; i++ {
  21. rawRecord, _ := Record.NewHStreamHRecord("", map[string]interface{}{
  22. "id": i,
  23. "isReady": true,
  24. "name": "hRecord-example",
  25. })
  26. r := producer.Append(rawRecord)
  27. result = append(result, r)
  28. }
  29. for i, res := range result {
  30. resp, err := res.Ready()
  31. if err != nil {
  32. log.Printf("write error: %s\n", err.Error())
  33. }
  34. log.Printf("record[%d]=%s\n", i, resp.String())
  35. }
  36. return nil
  37. }
  1. # https://github.com/hstreamdb/hstreamdb-py/blob/main/examples/snippets/guides.py
  2. import asyncio
  3. import hstreamdb
  4. import os
  5. # NOTE: Replace with your own host and port
  6. host = os.getenv("GUIDE_HOST", "127.0.0.1")
  7. port = os.getenv("GUIDE_PORT", 6570)
  8. stream_name = "your_stream"
  9. subscription = "your_subscription"
  10. # Run: asyncio.run(main(your_async_function))
  11. async def main(*funcs):
  12. async with await hstreamdb.insecure_client(host=host, port=port) as client:
  13. for f in funcs:
  14. await f(client)
  15. class AppendCallback(hstreamdb.BufferedProducer.AppendCallback):
  16. count = 0
  17. def on_success(self, stream_name, payloads, stream_keyid):
  18. self.count += 1
  19. print(f"Batch {self.count}: Append success with {len(payloads)} payloads.")
  20. def on_fail(self, stream_name, payloads, stream_keyid, e):
  21. print("Append failed!")
  22. print(e)
  23. async def buffered_append_records(client):
  24. p = client.new_producer(
  25. append_callback=AppendCallback(),
  26. size_trigger=10240,
  27. time_trigger=0.5,
  28. retry_count=2,
  29. )
  30. for i in range(50):
  31. await p.append(stream_name, b"some_raw_binary_bytes")
  32. await p.append(stream_name, {"msg": "hello"})
  33. await p.wait_and_close()

Write Records with Partition Keys

Partition keys are optional, and if not given, the server will automatically assign a default key. Records with the same partition key can be guaranteed to be written orderly in BufferedProducer.

Another important feature of HStreamDB, sharding, uses these partition keys to decide which shards the record will be allocated to and improve write/read performance. See Manage Shards of a Stream for a more detailed explanation.

You can easily write records with keys using the following example:

  1. // WriteDataWithKeyExample.java
  2. package docs.code.examples;
  3. import io.hstream.*;
  4. import io.hstream.Record;
  5. import java.util.ArrayList;
  6. import java.util.List;
  7. import java.util.Random;
  8. import java.util.concurrent.CompletableFuture;
  9. public class WriteDataWithKeyExample {
  10. public static void main(String[] args) throws Exception {
  11. // TODO (developers): Replace these variables for your own use cases.
  12. String serviceUrl = "127.0.0.1:6570";
  13. if (System.getenv("serviceUrl") != null) {
  14. serviceUrl = System.getenv("serviceUrl");
  15. }
  16. String streamName = "your_h_records_stream_name";
  17. HStreamClient client = HStreamClient.builder().serviceUrl(serviceUrl).build();
  18. writeHRecordDataWithKey(client, streamName);
  19. client.close();
  20. }
  21. public static void writeHRecordDataWithKey(HStreamClient client, String streamName) {
  22. // For demonstrations, we would use the following as our partition keys for the records.
  23. // As the documentations mentioned, if we do not give any partition key, it will get a default
  24. // key and be mapped to some default shard.
  25. String key1 = "South";
  26. String key2 = "North";
  27. // Create a buffered producer with default BatchSetting and FlowControlSetting.
  28. BufferedProducer producer = client.newBufferedProducer().stream(streamName).build();
  29. List<CompletableFuture<String>> recordIds = new ArrayList<>();
  30. Random random = new Random();
  31. for (int i = 0; i < 100; i++) {
  32. double temp = random.nextInt(100) / 10.0 + 15;
  33. Record record;
  34. if ((i % 3) == 0) {
  35. HRecord hRecord = HRecord.newBuilder().put("temperature", temp).put("withKey", 1).build();
  36. record = Record.newBuilder().hRecord(hRecord).partitionKey(key1).build();
  37. } else {
  38. HRecord hRecord = HRecord.newBuilder().put("temperature", temp).put("withKey", 2).build();
  39. record = Record.newBuilder().hRecord(hRecord).partitionKey(key2).build();
  40. }
  41. CompletableFuture<String> recordId = producer.write(record);
  42. recordIds.add(recordId);
  43. }
  44. System.out.println("Wrote message IDs: " + recordIds.stream().map(CompletableFuture::join));
  45. producer.close();
  46. }
  47. }
  1. // ExampleWriteBatchProducerMultiKey.go
  2. package examples
  3. import (
  4. "fmt"
  5. "github.com/hstreamdb/hstreamdb-go/hstream"
  6. "github.com/hstreamdb/hstreamdb-go/hstream/Record"
  7. "github.com/hstreamdb/hstreamdb-go/hstream/compression"
  8. "log"
  9. "math/rand"
  10. "sync"
  11. )
  12. func ExampleWriteBatchProducerMultiKey() error {
  13. client, err := hstream.NewHStreamClient(YourHStreamServiceUrl)
  14. if err != nil {
  15. log.Fatalf("Creating client error: %s", err)
  16. }
  17. defer client.Close()
  18. producer, err := client.NewBatchProducer("testStream",
  19. // optional: set record count and max batch bytes trigger
  20. hstream.WithBatch(10, 500),
  21. // optional: set timeout trigger
  22. hstream.TimeOut(1000),
  23. // optional: set client compression
  24. hstream.WithCompression(compression.Zstd),
  25. // optional: set flow control
  26. hstream.WithFlowControl(81920000))
  27. if err != nil {
  28. log.Fatalf("Creating producer error: %s", err)
  29. }
  30. defer producer.Stop()
  31. keys := []string{"sensor1", "sensor2", "sensor3", "sensor4", "sensor5"}
  32. rids := sync.Map{}
  33. wg := sync.WaitGroup{}
  34. wg.Add(5)
  35. for _, key := range keys {
  36. go func(key string) {
  37. result := make([]hstream.AppendResult, 0, 100)
  38. for i := 0; i < 100; i++ {
  39. temp := rand.Intn(100)/10.0 + 15
  40. rawRecord, _ := Record.NewHStreamHRecord(key, map[string]interface{}{
  41. key: fmt.Sprintf("temperature=%d", temp),
  42. })
  43. r := producer.Append(rawRecord)
  44. result = append(result, r)
  45. }
  46. rids.Store(key, result)
  47. wg.Done()
  48. }(key)
  49. }
  50. wg.Wait()
  51. rids.Range(func(key, value interface{}) bool {
  52. k := key.(string)
  53. res := value.([]hstream.AppendResult)
  54. for i := 0; i < 100; i++ {
  55. resp, err := res[i].Ready()
  56. if err != nil {
  57. log.Printf("write error: %s\n", err.Error())
  58. }
  59. log.Printf("[key: %s]: record[%d]=%s\n", k, i, resp.String())
  60. }
  61. return true
  62. })
  63. return nil
  64. }