From e91f1206075860c3db7fea22aaf1a57aea4b13f6 Mon Sep 17 00:00:00 2001
From: ysc <2725843507@qq.com>
Date: Wed, 23 Apr 2025 11:16:08 +0800
Subject: [PATCH 1/4] Add collector-distribution packaging module & Migrate
IoTDBDataRegionSyncConnector & Add scheduled triggering for batch
accumulation operations & Add subscribe source demo
---
.github/workflows/code-analysis.yml | 2 +-
iotdb-collector/collector-core/pom.xml | 146 +++++-
.../collector-core/src/assembly/core.xml | 40 ++
.../resources/conf/application.properties | 154 ++++++
.../src/assembly/resources/conf/logback.xml | 126 +++++
.../assembly/resources/sbin/collector-env.sh | 94 ++++
.../src/assembly/resources/sbin/common.sh | 181 +++++++
.../resources/sbin/start-collector.bat | 208 ++++++++
.../resources/sbin/start-collector.sh | 237 +++++++++
.../resources/sbin/stop-collector.bat | 67 +++
.../assembly/resources/sbin/stop-collector.sh | 86 ++++
.../apache/iotdb/collector/Application.java | 2 +
.../iotdb/collector/config/Configuration.java | 45 +-
.../iotdb/collector/config/Options.java | 13 +
.../collector/config/PipeRuntimeOptions.java | 283 +++++++++++
.../config/PluginRuntimeOptions.java | 16 +-
.../collector/config/TaskRuntimeOptions.java | 16 +-
.../collector/persistence/DBConstant.java | 13 +-
.../collector/persistence/Persistence.java | 1 +
.../persistence/PluginPersistence.java | 6 +-
.../persistence/TaskPersistence.java | 11 +-
.../collector/plugin/api/PullSource.java | 12 +-
.../collector/plugin/api/PushSource.java | 10 +
.../CollectorRuntimeEnvironment.java | 5 +
.../plugin/api/event/PeriodicalEvent.java | 24 +
.../plugin/builtin/BuiltinPlugin.java | 8 +-
.../processor/SubscriptionProcessor.java | 56 +++
.../sink/client/IoTDBClientManager.java | 114 +++++
...IoTDBDataNodeCacheLeaderClientManager.java | 79 +++
.../IoTDBDataNodeSyncClientManager.java | 130 +++++
.../builtin/sink/client/IoTDBSyncClient.java | 180 +++++++
.../sink/client/IoTDBSyncClientManager.java | 373 ++++++++++++++
.../builtin/sink/client/ThriftClient.java | 124 +++++
.../sink/client/ThriftClientProperty.java | 123 +++++
.../sink/compressor/PipeCompressor.java | 76 +++
.../sink/compressor/PipeCompressorConfig.java | 39 ++
.../compressor/PipeCompressorFactory.java | 116 +++++
.../sink/compressor/PipeGZIPCompressor.java | 54 ++
.../sink/compressor/PipeLZ4Compressor.java | 54 ++
.../sink/compressor/PipeLZMA2Compressor.java | 54 ++
.../sink/compressor/PipeSnappyCompressor.java | 54 ++
.../sink/compressor/PipeZSTDCompressor.java | 49 ++
.../sink/constant/ColumnHeaderConstant.java | 35 ++
.../sink/constant/PipeConnectorConstant.java | 261 ++++++++++
.../PipeTransferHandshakeConstant.java | 36 ++
.../sink/event/PipeInsertionEvent.java | 40 ++
.../event/PipeRawTabletInsertionEvent.java | 81 +++
.../sink/event/PipeTsFileInsertionEvent.java | 58 +++
...PipeRuntimeConnectorCriticalException.java | 103 ++++
.../PipeRuntimeCriticalException.java | 96 ++++
.../sink/exception/PipeRuntimeException.java | 54 ++
.../exception/PipeRuntimeExceptionType.java | 89 ++++
.../exception/PipeRuntimeMetaVersion.java | 77 +++
.../PipeRuntimeNonCriticalException.java | 101 ++++
...peRuntimeOutOfMemoryCriticalException.java | 104 ++++
.../builtin/sink/exception/PipeStatus.java | 50 ++
.../evolvable/batch/PipeTabletEventBatch.java | 114 +++++
.../batch/PipeTabletEventPlainBatch.java | 119 +++++
.../batch/PipeTabletEventTsFileBatch.java | 170 +++++++
.../batch/PipeTransferBatchReqBuilder.java | 179 +++++++
.../request/IoTDBConnectorRequestVersion.java | 36 ++
.../thrift/request/PipeRequestType.java | 90 ++++
.../request/PipeTransferCompressedReq.java | 150 ++++++
.../PipeTransferDataNodeHandshakeV1Req.java | 69 +++
.../PipeTransferDataNodeHandshakeV2Req.java | 69 +++
.../request/PipeTransferFilePieceReq.java | 117 +++++
.../request/PipeTransferFileSealReqV1.java | 107 ++++
.../request/PipeTransferFileSealReqV2.java | 163 ++++++
.../request/PipeTransferHandshakeV1Req.java | 100 ++++
.../request/PipeTransferHandshakeV2Req.java | 116 +++++
.../PipeTransferSchemaSnapshotPieceReq.java | 71 +++
.../PipeTransferSchemaSnapshotSealReq.java | 166 ++++++
.../thrift/request/PipeTransferSliceReq.java | 147 ++++++
.../request/PipeTransferTabletBatchReqV2.java | 148 ++++++
.../request/PipeTransferTabletBinaryReq.java | 98 ++++
.../PipeTransferTabletBinaryReqV2.java | 126 +++++
.../request/PipeTransferTabletRawReq.java | 126 +++++
.../request/PipeTransferTabletRawReqV2.java | 121 +++++
.../request/PipeTransferTsFilePieceReq.java | 70 +++
.../PipeTransferTsFilePieceWithModReq.java | 71 +++
.../PipeTransferTsFileSealWithModReq.java | 142 ++++++
.../response/PipeTransferFilePieceResp.java | 74 +++
.../builtin/sink/protocol/IoTDBConnector.java | 472 ++++++++++++++++++
.../IoTDBDataRegionSyncConnector.java | 365 ++++++++++++++
.../sink/protocol/IoTDBSslSyncConnector.java | 229 +++++++++
.../protocol/PipeReceiverStatusHandler.java | 199 ++++++++
.../sink/protocol/session/IClientSession.java | 75 +++
.../plugin/builtin/source/HttpPullSource.java | 6 -
.../plugin/builtin/source/HttpPushSource.java | 5 -
.../builtin/source/IoTDBPushSource.java | 129 +++++
.../constant/IoTDBPushSourceConstant.java | 34 ++
.../builtin/source/event/SubDemoEvent.java | 44 ++
.../event/common/PipeBinaryTransformer.java | 37 ++
.../event/common/PipeDataTypeTransformer.java | 74 +++
.../event/common/PipeResetTabletRow.java | 54 ++
.../builtin/source/event/common/PipeRow.java | 211 ++++++++
.../source/event/common/PipeRowCollector.java | 103 ++++
.../runtime/plugin/PluginRuntime.java | 2 +-
.../constructor/ProcessorConstructor.java | 3 +
.../plugin/constructor/SinkConstructor.java | 7 +-
.../plugin/constructor/SourceConstructor.java | 2 +
.../collector/runtime/task/TaskRuntime.java | 2 +
.../runtime/task/processor/ProcessorTask.java | 23 +-
.../collector/runtime/task/sink/SinkTask.java | 9 +-
.../task/source/pull/PullSourceTask.java | 9 +-
.../service/PeriodicalJobService.java | 129 +++++
.../iotdb/collector/utils/PathUtil.java | 22 +
.../collector/utils/PipeMemoryWeightUtil.java | 304 +++++++++++
.../plugin => }/utils/PluginFileUtils.java | 2 +-
.../builder/PipeTableModeTsFileBuilder.java | 273 ++++++++++
.../builder/PipeTreeModelTsFileBuilder.java | 268 ++++++++++
.../utils/builder/PipeTsFileBuilder.java | 112 +++++
.../utils/cacher/LeaderCacheUtils.java | 69 +++
.../preiodical/ScheduledExecutorUtil.java | 195 ++++++++
.../utils/preiodical/WrappedRunnable.java | 47 ++
.../PipeTableModelTabletEventSorter.java | 273 ++++++++++
.../utils/sorter/PipeTabletEventSorter.java | 105 ++++
.../PipeTreeModelTabletEventSorter.java | 123 +++++
.../src/main/resources/application.properties | 70 ---
.../src/main/resources/logback.xml | 44 --
.../collector-distribution/pom.xml | 93 ++++
.../src/assembly/collector-core.xml | 77 +++
iotdb-collector/pom.xml | 1 +
pom.xml | 116 ++++-
124 files changed, 11859 insertions(+), 183 deletions(-)
create mode 100644 iotdb-collector/collector-core/src/assembly/core.xml
create mode 100644 iotdb-collector/collector-core/src/assembly/resources/conf/application.properties
create mode 100644 iotdb-collector/collector-core/src/assembly/resources/conf/logback.xml
create mode 100644 iotdb-collector/collector-core/src/assembly/resources/sbin/collector-env.sh
create mode 100644 iotdb-collector/collector-core/src/assembly/resources/sbin/common.sh
create mode 100644 iotdb-collector/collector-core/src/assembly/resources/sbin/start-collector.bat
create mode 100644 iotdb-collector/collector-core/src/assembly/resources/sbin/start-collector.sh
create mode 100644 iotdb-collector/collector-core/src/assembly/resources/sbin/stop-collector.bat
create mode 100644 iotdb-collector/collector-core/src/assembly/resources/sbin/stop-collector.sh
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/config/PipeRuntimeOptions.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/api/event/PeriodicalEvent.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/processor/SubscriptionProcessor.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/IoTDBClientManager.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/IoTDBDataNodeCacheLeaderClientManager.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/IoTDBDataNodeSyncClientManager.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/IoTDBSyncClient.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/IoTDBSyncClientManager.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/ThriftClient.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/ThriftClientProperty.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeCompressor.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeCompressorConfig.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeCompressorFactory.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeGZIPCompressor.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeLZ4Compressor.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeLZMA2Compressor.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeSnappyCompressor.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeZSTDCompressor.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/constant/ColumnHeaderConstant.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/constant/PipeConnectorConstant.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/constant/PipeTransferHandshakeConstant.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/event/PipeInsertionEvent.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/event/PipeRawTabletInsertionEvent.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/event/PipeTsFileInsertionEvent.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeConnectorCriticalException.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeCriticalException.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeException.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeExceptionType.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeMetaVersion.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeNonCriticalException.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeOutOfMemoryCriticalException.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeStatus.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/evolvable/batch/PipeTabletEventBatch.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/evolvable/batch/PipeTabletEventPlainBatch.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/evolvable/batch/PipeTabletEventTsFileBatch.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/evolvable/batch/PipeTransferBatchReqBuilder.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/IoTDBConnectorRequestVersion.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeRequestType.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferCompressedReq.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferDataNodeHandshakeV1Req.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferDataNodeHandshakeV2Req.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferFilePieceReq.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferFileSealReqV1.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferFileSealReqV2.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferHandshakeV1Req.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferHandshakeV2Req.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferSchemaSnapshotPieceReq.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferSchemaSnapshotSealReq.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferSliceReq.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTabletBatchReqV2.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTabletBinaryReq.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTabletBinaryReqV2.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTabletRawReq.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTabletRawReqV2.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTsFilePieceReq.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTsFilePieceWithModReq.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTsFileSealWithModReq.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/response/PipeTransferFilePieceResp.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/protocol/IoTDBConnector.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/protocol/IoTDBDataRegionSyncConnector.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/protocol/IoTDBSslSyncConnector.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/protocol/PipeReceiverStatusHandler.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/protocol/session/IClientSession.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/IoTDBPushSource.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/constant/IoTDBPushSourceConstant.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/event/SubDemoEvent.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/event/common/PipeBinaryTransformer.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/event/common/PipeDataTypeTransformer.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/event/common/PipeResetTabletRow.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/event/common/PipeRow.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/event/common/PipeRowCollector.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/service/PeriodicalJobService.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/utils/PathUtil.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/utils/PipeMemoryWeightUtil.java
rename iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/{runtime/plugin => }/utils/PluginFileUtils.java (98%)
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/utils/builder/PipeTableModeTsFileBuilder.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/utils/builder/PipeTreeModelTsFileBuilder.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/utils/builder/PipeTsFileBuilder.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/utils/cacher/LeaderCacheUtils.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/utils/preiodical/ScheduledExecutorUtil.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/utils/preiodical/WrappedRunnable.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/utils/sorter/PipeTableModelTabletEventSorter.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/utils/sorter/PipeTabletEventSorter.java
create mode 100644 iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/utils/sorter/PipeTreeModelTabletEventSorter.java
delete mode 100644 iotdb-collector/collector-core/src/main/resources/application.properties
delete mode 100644 iotdb-collector/collector-core/src/main/resources/logback.xml
create mode 100644 iotdb-collector/collector-distribution/pom.xml
create mode 100644 iotdb-collector/collector-distribution/src/assembly/collector-core.xml
diff --git a/.github/workflows/code-analysis.yml b/.github/workflows/code-analysis.yml
index b8f15bc..f890970 100644
--- a/.github/workflows/code-analysis.yml
+++ b/.github/workflows/code-analysis.yml
@@ -34,7 +34,7 @@ jobs:
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
- queries: +security-extended
+ queries: +security-and-quality
- name: Auto Build
uses: github/codeql-action/autobuild@v3
diff --git a/iotdb-collector/collector-core/pom.xml b/iotdb-collector/collector-core/pom.xml
index c6517b8..9e9c945 100644
--- a/iotdb-collector/collector-core/pom.xml
+++ b/iotdb-collector/collector-core/pom.xml
@@ -50,17 +50,22 @@
org.apache.iotdb
- node-commons
- 1.3.2
+ service-rpc
+ 2.0.2-SNAPSHOTorg.apache.iotdb
- service-rpc
- 1.3.2
+ pipe-api
+ 2.0.2-SNAPSHOT
+
+
+ org.apache.tsfile
+ tsfile
+ 2.1.0-250325-SNAPSHOTorg.apache.iotdb
- pipe-api
+ tsfile1.3.2
@@ -103,16 +108,113 @@
com.lmaxdisruptor
+
+ org.java-websocket
+ Java-WebSocket
+
+
+ org.apache.commons
+ commons-lang3
+
+
+ org.apache.commons
+ commons-collections4
+
+
+ org.apache.commons
+ commons-pool2
+
+
+ org.apache.iotdb
+ iotdb-thrift-commons
+ 2.0.2-SNAPSHOT
+
+
+ org.apache.iotdb
+ iotdb-thrift
+ 2.0.2-SNAPSHOT
+
+
+ org.apache.iotdb
+ iotdb-session
+ 2.0.2-SNAPSHOT
+
+
+ net.java.dev.jna
+ jna
+
+
+ net.java.dev.jna
+ jna-platform
+
+
+ org.apache.thrift
+ libthrift
+
+
+ com.google.guava
+ guava
+
+
+ org.eclipse.milo
+ stack-core
+
+
+ org.eclipse.milo
+ sdk-core
+
+
+ org.eclipse.milo
+ stack-server
+
+
+ org.eclipse.milo
+ sdk-server
+
+
+ org.apache.tsfile
+ common
+
+
+ io.moquette
+ moquette-broker
+ com.google.code.findbugsjsr305
+
+ ch.qos.logback
+ logback-classic
+ org.xerialsqlite-jdbc
+
+ commons-io
+ commons-io
+
+
+ com.github.luben
+ zstd-jni
+
+
+ com.github.ben-manes.caffeine
+ caffeine
+
+
+
+ ${project.basedir}/src/assembly/resources/conf
+
+ application.properties
+ logback.xml
+
+ false
+
+ org.apache.maven.plugins
@@ -126,6 +228,40 @@
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+ 8
+ 8
+
+
+
+ org.apache.maven.plugins
+ maven-assembly-plugin
+
+
+
+ server-assembly
+
+ single
+
+ package
+
+
+ src/assembly/core.xml
+
+ false
+
+
+ true
+ true
+
+
+
+
+
+
diff --git a/iotdb-collector/collector-core/src/assembly/core.xml b/iotdb-collector/collector-core/src/assembly/core.xml
new file mode 100644
index 0000000..4db5225
--- /dev/null
+++ b/iotdb-collector/collector-core/src/assembly/core.xml
@@ -0,0 +1,40 @@
+
+
+
+ server
+
+ dir
+ zip
+
+ false
+
+
+ lib
+
+
+
+
+ src/assembly/resources
+ ${file.separator}
+
+
+
diff --git a/iotdb-collector/collector-core/src/assembly/resources/conf/application.properties b/iotdb-collector/collector-core/src/assembly/resources/conf/application.properties
new file mode 100644
index 0000000..7cfcea4
--- /dev/null
+++ b/iotdb-collector/collector-core/src/assembly/resources/conf/application.properties
@@ -0,0 +1,154 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+####################
+### API Service Configuration
+####################
+
+# The port on which the Jetty service runs.
+# Effective mode: on every start
+# Data type: int
+api_service_port=17070
+
+####################
+### Task Runtime Configuration
+####################
+
+# The number of concurrent threads for the source task.
+# Effective mode: on every start
+# Data type: int
+task_source_parallelism_num=1
+
+# The number of concurrent threads for the process task.
+# Effective mode: on every start
+# Data type: int
+task_process_parallelism_num=4
+
+# The number of concurrent threads for the sink task.
+# Effective mode: on every start
+# Data type: int
+task_sink_parallelism_num=4
+
+# The ring buffer size for the processor task.
+# Effective mode: on every start
+# Data type: int
+task_processor_ring_buffer_size=1024
+
+# The ring buffer size for the sink task.
+# Effective mode: on every start
+# Data type: int
+task_sink_ring_buffer_size=1024
+
+# Database file location of task
+# Effective mode: on every start
+# Data type: string
+task_database_file_path=system/database/task.db
+
+####################
+### Plugin Configuration
+####################
+
+# The location of plugin jar file
+# Effective mode: on every start
+# Data type: string
+plugin_lib_dir=system/plugin
+
+# Installation location of plugin jar file
+# Effective mode: on every start
+# Data type: string
+plugin_install_lib_dir=system/plugin/install
+
+# Database file location of plugin
+# Effective mode: on every start
+# Data type: string
+plugin_database_file_path=system/database/plugin.db
+
+####################
+### Pipe Configuration
+####################
+
+# The total bytes that all pipe sinks can transfer per second.
+# When given a value less than or equal to 0, it means no limit.
+# default value is -1, which means no limit.
+# Effective mode: on every start
+# Data type: double
+pipe_all_sinks_rate_limit_bytes_per_second=-1
+
+# Rate limiter configuration interval in milliseconds for hot reloading
+# Effective mode: on every start
+# Data type: int
+rate_limiter_hot_reload_check_interval_ms=1000
+
+# Maximum number of retry attempts for operations
+# Effective mode: on every start
+# Data type: int
+max_retry_times=5
+
+# Used for connection of IoTDB native clients
+# Bind with rpc_address
+# Effective mode: on every start
+# Data type: int
+rpc_port=6667
+
+# Used for connection of IoTDB native clients(Session)
+# Could set 127.0.0.1(for local test) or ipv4 address
+# Effective mode: on every start
+# Data type: String
+rpc_address=0.0.0.0
+
+# Buffer size for reading files in pipe connector (8MB default)
+# Effective mode: on every start
+# Data type: int
+pipe_connector_read_file_buffer_size=8388608
+
+# Timeout duration for pipe connector data transfer in milliseconds
+# Effective mode: on every start
+# Data type: int
+pipe_connector_transfer_timeout_ms=900000
+
+# Maximum allowed frame size for Thrift communication
+# Effective mode: on every start
+# Data type: int
+thrift_frame_max_size=536870912
+
+# Enable/disable thrift compression for pipe connector RPC
+# Effective mode: on every start
+# Data type: boolean
+is_pipe_connector_rpc_thrift_compression_enabled=false
+
+# Use this value to set timestamp precision as "ms", "us" or "ns".
+# Once the precision has been set, it can not be changed.
+# Effective mode: on every start
+# Data type: string
+timestamp_precision=ms
+
+# Memory allocation ratio for pipe leader cache management
+# Effective mode: on every start
+# Data type: float
+pipe_leader_cache_memory_usage_percentage=0.1
+
+# Enable/disable reference tracking for pipe events
+# Effective mode: on every start
+# Data type: boolean
+pipe_event_reference_tracking_enabled=true
+
+# Proactively triggers the interval for batch deliveries
+# Effective mode: on every start
+# Data type: long
+executor_cron_heartbeat_event_interval_seconds=20
\ No newline at end of file
diff --git a/iotdb-collector/collector-core/src/assembly/resources/conf/logback.xml b/iotdb-collector/collector-core/src/assembly/resources/conf/logback.xml
new file mode 100644
index 0000000..c3e66ca
--- /dev/null
+++ b/iotdb-collector/collector-core/src/assembly/resources/conf/logback.xml
@@ -0,0 +1,126 @@
+
+
+
+
+
+
+
+
+ %d [%t] %-5p %C{25}:%L - %m %n
+
+
+
+ ${COLLECTOR_HOME}/logs/application_error.log
+
+ ${COLLECTOR_HOME}/logs/log-application-error-%d{yyyyMMd}.log.gz
+ 30
+
+ true
+
+ %d [%t] %-5p %C{25}:%L - %m %n
+ utf-8
+
+
+ ERROR
+ ACCEPT
+ DENY
+
+
+
+ ${COLLECTOR_HOME}/logs/application_warn.log
+
+ ${COLLECTOR_HOME}/logs/log-application-warn-%d{yyyyMMd}.log.gz
+ 30
+
+ true
+
+ %d [%t] %-5p %C{25}:%L - %m %n
+ utf-8
+
+
+ WARN
+ ACCEPT
+ DENY
+
+
+
+ ${COLLECTOR_HOME}/logs/application_debug.log
+
+ ${COLLECTOR_HOME}/logs/log-application-debug-%d{yyyyMMd}.log.gz
+ 30
+
+ true
+
+ %d [%t] %-5p %C{25}:%L - %m %n
+ utf-8
+
+
+ DEBUG
+ ACCEPT
+ DENY
+
+
+
+ ${COLLECTOR_HOME}/logs/application_trace.log
+
+ ${COLLECTOR_HOME}/logs/log-application-trace-%d{yyyyMMdd}.log.gz
+ 30
+
+ true
+
+ %d [%t] %-5p %C{25}:%L - %m %n
+ utf-8
+
+
+ TRACE
+ ACCEPT
+ DENY
+
+
+
+ ${COLLECTOR_HOME}/logs/application_all.log
+
+ ${COLLECTOR_HOME}/logs/log-application-all-%d{yyyyMMd}.log.gz
+ 30
+
+ true
+
+ %d [%t] %-5p %C{25}:%L - %m %n
+ utf-8
+
+
+ INFO
+ ACCEPT
+ DENY
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/iotdb-collector/collector-core/src/assembly/resources/sbin/collector-env.sh b/iotdb-collector/collector-core/src/assembly/resources/sbin/collector-env.sh
new file mode 100644
index 0000000..2e265c3
--- /dev/null
+++ b/iotdb-collector/collector-core/src/assembly/resources/sbin/collector-env.sh
@@ -0,0 +1,94 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# find java in JAVA_HOME
+if [ -n "$JAVA_HOME" ]; then
+ for java in "$JAVA_HOME"/bin/amd64/java "$JAVA_HOME"/bin/java; do
+ if [ -x "$java" ]; then
+ JAVA="$java"
+ break
+ fi
+ done
+else
+ JAVA=java
+fi
+
+if [ -z $JAVA ] ; then
+ echo Unable to find java executable. Check JAVA_HOME and PATH environment variables. > /dev/stderr
+ exit 1;
+fi
+
+# Determine the sort of JVM we'll be running on.
+java_ver_output=`"$JAVA" -version 2>&1`
+jvmver=`echo "$java_ver_output" | grep '[openjdk|java] version' | awk -F'"' 'NR==1 {print $2}' | cut -d\- -f1`
+JVM_VERSION=${jvmver%_*}
+JVM_PATCH_VERSION=${jvmver#*_}
+if [ "$JVM_VERSION" \< "1.8" ] ; then
+ echo "IoTDB requires Java 8u92 or later."
+ exit 1;
+fi
+
+if [ "$JVM_VERSION" \< "1.8" ] && [ "$JVM_PATCH_VERSION" -lt 92 ] ; then
+ echo "IoTDB requires Java 8u92 or later."
+ exit 1;
+fi
+
+version_arr=(${JVM_VERSION//./ })
+
+illegal_access_params=""
+#GC log path has to be defined here because it needs to access COLLECTOR_HOME
+if [ "${version_arr[0]}" = "1" ] ; then
+ # Java 8
+ MAJOR_VERSION=${version_arr[1]}
+ echo "$COLLECTOR_JMX_OPTS" | grep -q "^-[X]loggc"
+ if [ "$?" = "1" ] ; then # [X] to prevent ccm from replacing this line
+ # only add -Xlog:gc if it's not mentioned in jvm-server.options file
+ mkdir -p ${COLLECTOR_HOME}/logs
+ if [ "$#" -ge "1" -a "$1" == "printgc" ]; then
+ COLLECTOR_JMX_OPTS="$COLLECTOR_JMX_OPTS -Xloggc:${COLLECTOR_HOME}/logs/gc.log -XX:+PrintGCDateStamps -XX:+PrintGCDetails -XX:+PrintGCApplicationStoppedTime -XX:+PrintPromotionFailure -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=10M"
+ # For more detailed GC information, you can uncomment option below.
+ # NOTE: more detailed GC information may bring larger GC log files.
+ # COLLECTOR_JMX_OPTS="$COLLECTOR_JMX_OPTS -Xloggc:${COLLECTOR_HOME}/logs/gc.log -XX:+PrintGCDateStamps -XX:+PrintGCDetails -XX:+PrintGCApplicationStoppedTime -XX:+PrintPromotionFailure -XX:+UseGCLogFileRotation -XX:+PrintTenuringDistribution -XX:+PrintHeapAtGC -XX:+PrintReferenceGC -XX:+PrintSafepointStatistics -XX:PrintSafepointStatisticsCount=1 -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M"
+ fi
+ fi
+else
+ #JDK 11 and others
+ MAJOR_VERSION=${version_arr[0]}
+ # See description of https://bugs.openjdk.java.net/browse/JDK-8046148 for details about the syntax
+ # The following is the equivalent to -XX:+PrintGCDetails -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=10M
+ echo "$COLLECTOR_JMX_OPTS" | grep -q "^-[X]log:gc"
+ if [ "$?" = "1" ] ; then # [X] to prevent ccm from replacing this line
+ # only add -Xlog:gc if it's not mentioned in jvm-server.options file
+ mkdir -p ${COLLECTOR_HOME}/logs
+ if [ "$#" -ge "1" -a "$1" == "printgc" ]; then
+ COLLECTOR_JMX_OPTS="$COLLECTOR_JMX_OPTS -Xlog:gc=info,heap*=info,age*=info,safepoint=info,promotion*=info:file=${COLLECTOR_HOME}/logs/gc.log:time,uptime,pid,tid,level:filecount=10,filesize=10485760"
+ # For more detailed GC information, you can uncomment option below.
+ # NOTE: more detailed GC information may bring larger GC log files.
+ # COLLECTOR_JMX_OPTS="$COLLECTOR_JMX_OPTS -Xlog:gc*=debug,heap*=debug,age*=trace,metaspace*=info,safepoint*=debug,promotion*=info:file=${COLLECTOR_HOME}/logs/gc.log:time,uptime,pid,tid,level,tags:filecount=10,filesize=100M"
+ fi
+ fi
+ # Add argLine for Java 11 and above, due to [JEP 396: Strongly Encapsulate JDK Internals by Default] (https://openjdk.java.net/jeps/396)
+ illegal_access_params="$illegal_access_params --add-opens=java.base/java.util.concurrent=ALL-UNNAMED"
+ illegal_access_params="$illegal_access_params --add-opens=java.base/java.lang=ALL-UNNAMED"
+ illegal_access_params="$illegal_access_params --add-opens=java.base/java.util=ALL-UNNAMED"
+ illegal_access_params="$illegal_access_params --add-opens=java.base/java.nio=ALL-UNNAMED"
+ illegal_access_params="$illegal_access_params --add-opens=java.base/java.io=ALL-UNNAMED"
+ illegal_access_params="$illegal_access_params --add-opens=java.base/java.net=ALL-UNNAMED"
+fi
\ No newline at end of file
diff --git a/iotdb-collector/collector-core/src/assembly/resources/sbin/common.sh b/iotdb-collector/collector-core/src/assembly/resources/sbin/common.sh
new file mode 100644
index 0000000..fe56821
--- /dev/null
+++ b/iotdb-collector/collector-core/src/assembly/resources/sbin/common.sh
@@ -0,0 +1,181 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# this function is for parsing the variables like "A=B" in `start-server.sh -D A=B`
+# The command just parse COLLECTOR-prefixed variables and ignore all other variables
+check_env_variables() {
+ string="$1"
+ array=$(echo $string | tr '=' ' ')
+ eval set -- "$array"
+ case "$1" in
+ COLLECTOR_INCLUDE)
+ COLLECTOR_INCLUDE="$2"
+ ;;
+ COLLECTOR_HOME)
+ COLLECTOR_HOME="$2"
+ ;;
+ COLLECTOR_DATA_HOME)
+ COLLECTOR_DATA_HOME="$2"
+ ;;
+ COLLECTOR_CONF)
+ COLLECTOR_CONF="$2"
+ ;;
+ COLLECTOR_LOG_DIR)
+ COLLECTOR_LOG_DIR="$2"
+ ;;
+ COLLECTOR_LOG_CONFIG)
+ COLLECTOR_LOG_CONFIG="$2"
+ ;;
+ COLLECTOR_CLI_CONF)
+ COLLECTOR_CLI_CONF="$2"
+ ;;
+ *)
+ #we can not process it, so that we return back.
+ echo "$1=$2"
+ ;;
+ esac
+ echo ""
+}
+
+check_all_variables() {
+ if [ -z "${COLLECTOR_INCLUDE}" ]; then
+ #do nothing
+ :
+ elif [ -r "$COLLECTOR_INCLUDE" ]; then
+ . "$COLLECTOR_INCLUDE"
+ fi
+
+ if [ -z "${COLLECTOR_HOME}" ]; then
+ export COLLECTOR_HOME="`dirname "$0"`/.."
+ fi
+
+ if [ -z "${COLLECTOR_DATA_HOME}" ]; then
+ export COLLECTOR_DATA_HOME=${COLLECTOR_HOME}
+ fi
+
+ if [ -z "${COLLECTOR_CONF}" ]; then
+ export COLLECTOR_CONF=${COLLECTOR_HOME}/conf
+ fi
+
+ if [ -z "${COLLECTOR_LOG_DIR}" ]; then
+ export COLLECTOR_LOG_DIR=${COLLECTOR_HOME}/logs
+ fi
+
+ if [ -z "${COLLECTOR_LOG_CONFIG}" ]; then
+ export COLLECTOR_LOG_CONFIG="${COLLECTOR_CONF}/logback.xml"
+ fi
+}
+
+check_config_unique() {
+ local key=$1
+ local values=$2
+
+ line_count=$(echo "$values" | wc -l)
+
+ if [ "$line_count" -gt 1 ]; then
+ echo "Error: Duplicate $key entries found"
+ exit 1
+ fi
+}
+
+check_collector_port_usages() {
+ echo "Checking whether the ports are already occupied..."
+ if [ "$(id -u)" -ne 0 ]; then
+ echo "Warning: If you do not use sudo, the checking may not detect all the occupied ports."
+ fi
+ occupied=false
+ if [ -f "$COLLECTOR_CONF/application.properties" ]; then
+ api_service_port=$(sed '/^api_service_port=/!d;s/.*=//' "${COLLECTOR_CONF}"/application.properties | tr -d '\r')
+ elif [ -f "$COLLECTOR_HOME/conf/application.properties" ]; then
+ api_service_port=$(sed '/^api_service_port=/!d;s/.*=//' "${COLLECTOR_HOME}"/conf/application.properties | tr -d '\r')
+ elif [ -f "$COLLECTOR_CONF/application.properties" ]; then
+ api_service_port=$(sed '/^api_service_port=/!d;s/.*=//' "${COLLECTOR_CONF}"/application.properties | tr -d '\r')
+ elif [ -f "$COLLECTOR_HOME/conf/application.properties" ]; then
+ api_service_port=$(sed '/^api_service_port=/!d;s/.*=//' "${COLLECTOR_HOME}"/conf/application.properties | tr -d '\r')
+ else
+ echo "Warning: cannot find application.properties, check the default configuration"
+ fi
+
+ check_config_unique "api_service_port" "$api_service_port"
+
+ api_service_port=${api_service_port:-17070}
+ if type lsof >/dev/null 2>&1; then
+ PID=$(lsof -t -i:"${api_service_port}" -sTCP:LISTEN)
+ if [ -n "$PID" ]; then
+ echo "The api_service_port" "$api_service_port" "is already occupied, PID:" "$PID"
+ occupied=true
+ fi
+ elif type netstat >/dev/null 2>&1; then
+ PID=$(netstat -anp 2>/dev/null | grep ":${api_service_port} " | grep ' LISTEN ' | awk '{print $NF}' | sed "s|/.*||g")
+ if [ -n "$PID" ]; then
+ echo "The api_service_port" "$api_service_port" "is already occupied, PID:" "$PID"
+ occupied=true
+ fi
+ else
+ echo " Error: No necessary tool to check whether given port is occupied, stop ports checking"
+ echo " Please install 'lsof' or 'netstat'."
+ fi
+ if [ $occupied = true ]; then
+ echo "Exit because there are occupied ports."
+ exit 0
+ fi
+}
+
+init_env() {
+ if [ -f "$COLLECTOR_CONF/collector-env.sh" ]; then
+ if [ "x$PRINT_GC" != "x" ]; then
+ . "$COLLECTOR_CONF/collector-env.sh" "printgc"
+ else
+ . "$COLLECTOR_CONF/collector-env.sh"
+ fi
+ elif [ -f "${COLLECTOR_HOME}/sbin/collector-env.sh" ]; then
+ if [ "x$PRINT_GC" != "x" ]; then
+ . "${COLLECTOR_HOME}/sbin/collector-env.sh" "printgc"
+ else
+ . "${COLLECTOR_HOME}/sbin/collector-env.sh"
+ fi
+ else
+ echo "Can't find $COLLECTOR_CONF/collector-env.sh"
+ fi
+}
+
+get_iotdb_collector_include() {
+ #reset $1 to $* for this command
+ eval set -- "$1"
+ VARS=""
+ while true; do
+ case "$1" in
+ -D)
+ VARS="$VARS $(checkEnvVariables $2)"
+ shift 2
+ ;;
+ "")
+ #if we do not use getopt, we then have to process the case that there is no argument.
+ #in some systems, when there is no argument, shift command may throw error, so we skip directly
+ break
+ ;;
+ *)
+ VARS="$VARS $1"
+ shift
+ ;;
+ esac
+ done
+ echo "$VARS"
+}
\ No newline at end of file
diff --git a/iotdb-collector/collector-core/src/assembly/resources/sbin/start-collector.bat b/iotdb-collector/collector-core/src/assembly/resources/sbin/start-collector.bat
new file mode 100644
index 0000000..d40c579
--- /dev/null
+++ b/iotdb-collector/collector-core/src/assembly/resources/sbin/start-collector.bat
@@ -0,0 +1,208 @@
+@REM
+@REM Licensed to the Apache Software Foundation (ASF) under one
+@REM or more contributor license agreements. See the NOTICE file
+@REM distributed with this work for additional information
+@REM regarding copyright ownership. The ASF licenses this file
+@REM to you under the Apache License, Version 2.0 (the
+@REM "License"); you may not use this file except in compliance
+@REM with the License. You may obtain a copy of the License at
+@REM
+@REM http://www.apache.org/licenses/LICENSE-2.0
+@REM
+@REM Unless required by applicable law or agreed to in writing,
+@REM software distributed under the License is distributed on an
+@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+@REM KIND, either express or implied. See the License for the
+@REM specific language governing permissions and limitations
+@REM under the License.
+@REM
+
+@echo off
+@REM set cmd format
+chcp 65001
+
+title IoTDB Collector
+
+echo ````````````````````````
+echo Starting IoTDB Collector
+echo ````````````````````````
+
+@REM -----------------------------------------------------------------------------
+@REM SET JAVA
+set PATH="%JAVA_HOME%\bin\";%PATH%
+set "FULL_VERSION="
+set "MAJOR_VERSION="
+set "MINOR_VERSION="
+
+
+for /f tokens^=2-5^ delims^=.-_+^" %%j in ('java -fullversion 2^>^&1') do (
+ set "FULL_VERSION=%%j-%%k-%%l-%%m"
+ IF "%%j" == "1" (
+ set "MAJOR_VERSION=%%k"
+ set "MINOR_VERSION=%%l"
+ ) else (
+ set "MAJOR_VERSION=%%j"
+ set "MINOR_VERSION=%%k"
+ )
+)
+
+set JAVA_VERSION=%MAJOR_VERSION%
+
+@REM we do not check jdk that version less than 1.8 because they are too stale...
+IF "%JAVA_VERSION%" == "6" (
+ echo IoTDB only supports jdk >= 8, please check your java version.
+ goto finally
+)
+IF "%JAVA_VERSION%" == "7" (
+ echo IoTDB only supports jdk >= 8, please check your java version.
+ goto finally
+)
+
+@REM -----------------------------------------------------------------------------
+@REM SET DIR
+if "%OS%" == "Windows_NT" setlocal
+
+pushd %~dp0..
+if NOT DEFINED COLLECTOR_HOME set COLLECTOR_HOME=%cd%
+popd
+
+SET enable_printgc=false
+IF "%1" == "printgc" (
+ SET enable_printgc=true
+ SHIFT
+)
+
+SET COLLECTOR_CONF=%COLLECTOR_HOME%\conf
+SET COLLECTOR_LOGS=%COLLECTOR_HOME%\logs
+
+@setlocal ENABLEDELAYEDEXPANSION ENABLEEXTENSIONS
+set is_conf_path=false
+for %%i in (%*) do (
+ IF "%%i" == "-c" (
+ set is_conf_path=true
+ ) ELSE IF "!is_conf_path!" == "true" (
+ set is_conf_path=false
+ set COLLECTOR_CONF=%%i
+ )
+)
+
+@REM SET CONFIG FILE
+IF EXIST "%COLLECTOR_CONF%\application.properties" (
+ set CONFIG_FILE="%COLLECTOR_CONF%\application.properties"
+) ELSE IF EXIST "%COLLECTOR_HOME%\conf\application.properties" (
+ set CONFIG_FILE="%COLLECTOR_HOME%\conf\application.properties"
+) ELSE (
+ set CONFIG_FILE=
+)
+
+@REM CHECK THE PORT USAGES
+IF DEFINED CONFIG_FILE (
+ for /f "eol=# tokens=2 delims==" %%i in ('findstr /i "^api_service_port"
+ "%CONFIG_FILE%"') do (
+ set api_service_port=%%i
+ )
+) ELSE (
+ echo "Can't find application.properties, check the default ports"
+ set api_service_port=17070
+)
+
+echo Check whether the ports are occupied....
+set occupied=0
+set api_service_port_occupied=0
+
+for /f "tokens=1,3,7 delims=: " %%i in ('netstat /ano') do (
+ if %%i==TCP (
+ if %%j==%api_service_port% (
+ if !api_service_port_occupied!==0 (
+ echo The api_service_port %api_service_port% is already occupied, pid:%%k
+ set occupied=1
+ set api_service_port_occupied=1
+ )
+ )
+ )
+)
+
+if %occupied%==1 (
+ echo There exists occupied port, please change the configuration.
+ TIMEOUT /T 10 /NOBREAK
+ exit 0
+)
+
+@setlocal ENABLEDELAYEDEXPANSION ENABLEEXTENSIONS
+set CONF_PARAMS=-s
+if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.collector.Application
+if NOT DEFINED JAVA_HOME goto :err
+
+@REM -----------------------------------------------------------------------------
+@REM JVM Opts we'll use in legacy run or installation
+@REM -DTSFILE_HOME="%COLLECTOR_HOME%"^
+@REM -DTSFILE_CONF="%COLLECTOR_CONF%"^
+set JAVA_OPTS=-ea^
+ -Dlogback.configurationFile="%COLLECTOR_CONF%\logback.xml"^
+ -DCOLLECTOR_HOME="%COLLECTOR_HOME%"^
+ -DCOLLECTOR_CONF="%COLLECTOR_CONF%"^
+ -DOFF_HEAP_MEMORY="%OFF_HEAP_MEMORY%"^
+ -Dsun.jnu.encoding=UTF-8^
+ -Dfile.encoding=UTF-8
+
+@REM ----------------------------------------------------------------------------
+@REM ***** CLASSPATH library setting *****
+@REM Ensure that any user defined CLASSPATH variables are not used on startup
+if EXIST "%COLLECTOR_HOME%\lib" (set CLASSPATH="%COLLECTOR_HOME%\lib\*") else set CLASSPATH="%COLLECTOR_HOME%\..\lib\*"
+
+@REM this special suffix 'iotdb.DataNode' is mandatory as stop-node.bat uses it to filter the process id.
+set CLASSPATH=%CLASSPATH%;collector.Application
+goto okClasspath
+
+:append
+set CLASSPATH=%CLASSPATH%;%1
+goto :eof
+
+@REM -----------------------------------------------------------------------------
+:okClasspath
+
+rem echo CLASSPATH: %CLASSPATH%
+
+@REM ----------------------------------------------------------------------------
+@REM SET PARA
+
+@REM iotdb-server runs in foreground by default
+@REM set foreground=0
+set foreground=yes
+
+:checkPara
+set COMMANSLINE=%*
+@REM setlocal ENABLEDELAYEDEXPANSION
+:STR_VISTOR
+for /f "tokens=1* delims= " %%a in ("%COMMANSLINE%") do (
+@REM -----more para-----
+for /f "tokens=1* delims==" %%1 in ("%%a") do (
+@REM echo 1=%%1 "|||" 2=%%2
+@REM if "%%1"=="-v" ( java %JAVA_OPTS% -Dlogback.configurationFile="%COLLECTOR_CONF%/logback-tool.xml" -cp %CLASSPATH% org.apache.iotdb.db.service.GetVersion & goto finally )
+if "%%1"=="-f" ( set foreground=yes)
+if "%%1"=="-d" ( set foreground=0)
+)
+set COMMANSLINE=%%b
+goto STR_VISTOR
+)
+
+@REM ----------------------------------------------------------------------------
+@REM START
+:start
+if %foreground%==yes (
+ java %ILLEGAL_ACCESS_PARAMS% %JAVA_OPTS% %COLLECTOR_HEAP_OPTS% -cp %CLASSPATH% %COLLECTOR_JMX_OPTS% %MAIN_CLASS% %CONF_PARAMS%
+ ) ELSE (
+ start javaw %ILLEGAL_ACCESS_PARAMS% %JAVA_OPTS% %COLLECTOR_HEAP_OPTS% -cp %CLASSPATH% %COLLECTOR_JMX_OPTS% %MAIN_CLASS% %CONF_PARAMS%
+ )
+
+goto finally
+
+:err
+echo JAVA_HOME environment variable must be set!
+pause
+
+
+@REM -----------------------------------------------------------------------------
+:finally
+@ENDLOCAL
+pause
\ No newline at end of file
diff --git a/iotdb-collector/collector-core/src/assembly/resources/sbin/start-collector.sh b/iotdb-collector/collector-core/src/assembly/resources/sbin/start-collector.sh
new file mode 100644
index 0000000..61935ac
--- /dev/null
+++ b/iotdb-collector/collector-core/src/assembly/resources/sbin/start-collector.sh
@@ -0,0 +1,237 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+source "$(dirname "$0")/common.sh"
+
+# iotdb collector server runs on foreground by default
+foreground="yes"
+
+if [ $# -ne 0 ]; then
+ echo "All parameters are $*"
+fi
+
+while true; do
+ case "$1" in
+ -c)
+ COLLECTOR_CONF="$2"
+ shift 2
+ ;;
+ -p)
+ pidfile="$2"
+ shift 2
+ ;;
+ -f)
+ foreground="yes"
+ shift
+ ;;
+ -d)
+ foreground=""
+ shift
+ ;;
+ -g)
+ PRINT_GC="yes"
+ shift
+ ;;
+ -h)
+ echo "Usage: $0 [-v] [-f] [-d] [-h] [-p pidfile] [-c configFolder] [-H HeapDumpPath] [-E JvmErrorFile] [printgc]"
+ exit 0
+ ;;
+ -v)
+ SHOW_VERSION="yes"
+ break
+ ;;
+ --)
+ shift
+ #all others are args to the program
+ PARAMS=$*
+ break
+ ;;
+ "")
+ #if we do not use getopt, we then have to process the case that there is no argument.
+ #in some systems, when there is no argument, shift command may throw error, so we skip directly
+ #all others are args to the program
+ PARAMS=$*
+ break
+ ;;
+ *)
+ echo "Error parsing arguments! Unknown argument \"$1\"" >&2
+ exit 1
+ ;;
+ esac
+done
+
+if [ "$(id -u)" -ne 0 ]; then
+ echo "Notice: in some systems, DataNode must run in sudo mode to write data. The process may fail."
+fi
+
+#check_all_variables is in common.sh
+check_all_variables
+
+#check_collector_port_usages is in common.sh
+check_collector_port_usages
+
+CLASSPATH=""
+for f in "${COLLECTOR_HOME}"/lib/*.jar; do
+ CLASSPATH=${CLASSPATH}":"$f
+done
+
+classname=org.apache.iotdb.collector.Application
+
+if [ "x$SHOW_VERSION" != "x" ]; then
+ COLLECTOR_LOG_CONFIG="${COLLECTOR_CONF}/logback.xml"
+ # find java in JAVA_HOME
+ if [ -n "$JAVA_HOME" ]; then
+ for java in "$JAVA_HOME"/bin/amd64/java "$JAVA_HOME"/bin/java; do
+ if [ -x "$java" ]; then
+ JAVA="$java"
+ break
+ fi
+ done
+ else
+ JAVA=java
+ fi
+ exit 0
+fi
+
+echo ---------------------
+echo "Starting IoTDB Collector"
+echo ---------------------
+
+#init_env is in common.sh
+init_env
+
+# check whether we can enable heap dump when oom
+if [ "x$COLLECTOR_ALLOW_HEAP_DUMP" == "xtrue" ]; then
+ COLLECTOR_JVM_OPTS="$COLLECTOR_JVM_OPTS $COLLECTOR_HEAP_DUMP_COMMAND"
+fi
+
+PARAMS="-s $PARAMS"
+
+classname=org.apache.iotdb.collector.Application
+
+launch_service() {
+ class="$1"
+ collector_parms="-Dlogback.configurationFile=${COLLECTOR_LOG_CONFIG}"
+ collector_parms="$collector_parms -DCOLLECTOR_HOME=${COLLECTOR_HOME}"
+ collector_parms="$collector_parms -DCOLLECTOR_DATA_HOME=${COLLECTOR_DATA_HOME}"
+ collector_parms="$collector_parms -DTSFILE_HOME=${COLLECTOR_HOME}"
+ collector_parms="$collector_parms -DCOLLECTOR_CONF=${COLLECTOR_CONF}"
+ collector_parms="$collector_parms -DTSFILE_CONF=${COLLECTOR_CONF}"
+ collector_parms="$collector_parms -Dname=collector\.IoTDB"
+ collector_parms="$collector_parms -DCOLLECTOR_LOG_DIR=${COLLECTOR_LOG_DIR}"
+ collector_parms="$collector_parms -DOFF_HEAP_MEMORY=${OFF_HEAP_MEMORY}"
+
+ if [ "x$pidfile" != "x" ]; then
+ collector_parms="collector_parms -Dcollector-pidfile=$pidfile"
+ fi
+
+ if [ "x$foreground" == "xyes" ]; then
+ collector_parms="$collector_parms -Dcollector-foreground=yes"
+ if [ "x$JVM_ON_OUT_OF_MEMORY_ERROR_OPT" != "x" ]; then
+ [ ! -z "$pidfile" ] && printf "%d" $! > "$pidfile"
+ exec $NUMACTL "$JAVA" $JVM_OPTS "$JVM_ON_OUT_OF_MEMORY_ERROR_OPT" $illegal_access_params $collector_parms $COLLECTOR_JMX_OPTS -cp "$CLASSPATH" $COLLECTOR_JVM_OPTS "$class" $PARAMS
+ else
+ [ ! -z "$pidfile" ] && printf "%d" $! > "$pidfile"
+ exec $NUMACTL "$JAVA" $JVM_OPTS $illegal_access_params $collector_parms $COLLECTOR_JMX_OPTS -cp "$CLASSPATH" $COLLECTOR_JVM_OPTS "$class" $PARAMS
+ fi
+ # Startup IoTDB, background it, and write the pid.
+ else
+ if [ "x$JVM_ON_OUT_OF_MEMORY_ERROR_OPT" != "x" ]; then
+ exec $NUMACTL "$JAVA" $JVM_OPTS "$JVM_ON_OUT_OF_MEMORY_ERROR_OPT" $illegal_access_params $collector_parms $COLLECTOR_JMX_OPTS -cp "$CLASSPATH" $COLLECTOR_JVM_OPTS "$class" $PARAMS 2>&1 > /dev/null <&- &
+ [ ! -z "$pidfile" ] && printf "%d" $! > "$pidfile"
+ true
+ else
+ exec $NUMACTL "$JAVA" $JVM_OPTS $illegal_access_params $collector_parms $COLLECTOR_JMX_OPTS -cp "$CLASSPATH" $COLLECTOR_JVM_OPTS "$class" $PARAMS 2>&1 > /dev/null <&- &
+ [ ! -z "$pidfile" ] && printf "%d" $! > "$pidfile"
+ true
+ fi
+ fi
+
+ return $?
+}
+
+# check whether tool 'lsof' exists
+check_tool_env() {
+ if ! type lsof > /dev/null 2>&1 ; then
+ echo ""
+ echo " Warning: No tool 'lsof', Please install it."
+ echo " Note: Some checking function need 'lsof'."
+ echo ""
+ return 1
+ else
+ return 0
+ fi
+}
+
+# convert path to real full-path.
+# e.g., /a/b/c/.. will return /a/b
+# If path has been deleted, return ""
+get_real_path() {
+ local path=$1
+ local real_path=""
+ cd $path > /dev/null 2>&1
+ if [ $? -eq 0 ] ; then
+ real_path=$(pwd -P)
+ cd - > /dev/null 2>&1
+ fi
+ echo "${real_path}"
+}
+
+# check whether same directory's IoTDB node process has been running
+check_running_process() {
+ check_tool_env
+
+ PIDS=$(ps ax | grep "$classname" | grep java | grep DCOLLECTOR_HOME | grep -v grep | awk '{print $1}')
+ for pid in ${PIDS}
+ do
+ run_conf_path=""
+ # find the abstract path of the process
+ run_cwd=$(lsof -p $pid 2>/dev/null | awk '$4~/cwd/ {print $NF}')
+ # find "-DCOLLECTOR_HOME=XXX" from the process command
+ run_home_path=$(ps -fp $pid | sed "s/ /\n/g" | sed -n "s/-DCOLLECTOR_HOME=//p")
+ run_home_path=$(get_real_path "${run_cwd}/${run_home_path}")
+
+ #if dir ${run_home_path} has been deleted
+ if [ "${run_home_path}" == "" ]; then
+ continue
+ fi
+
+ current_home_path=$(get_real_path ${COLLECTOR_HOME})
+ if [ "${run_home_path}" == "${current_home_path}" ]; then
+ echo ""
+ echo " Found running IoTDB Collector (PID=$pid)." >&2
+ echo " Can not run duplicated IoTDB Collector!" >&2
+ echo " Exit..." >&2
+ echo ""
+ exit 1
+ fi
+ done
+}
+
+check_tool_env
+# If needed tool is ready, check whether same directory's IoTDB node is running
+if [ $? -eq 0 ]; then
+ check_running_process
+fi
+
+# Start up the service
+launch_service "$classname"
+
+exit $?
diff --git a/iotdb-collector/collector-core/src/assembly/resources/sbin/stop-collector.bat b/iotdb-collector/collector-core/src/assembly/resources/sbin/stop-collector.bat
new file mode 100644
index 0000000..e8daee2
--- /dev/null
+++ b/iotdb-collector/collector-core/src/assembly/resources/sbin/stop-collector.bat
@@ -0,0 +1,67 @@
+@REM
+@REM Licensed to the Apache Software Foundation (ASF) under one
+@REM or more contributor license agreements. See the NOTICE file
+@REM distributed with this work for additional information
+@REM regarding copyright ownership. The ASF licenses this file
+@REM to you under the Apache License, Version 2.0 (the
+@REM "License"); you may not use this file except in compliance
+@REM with the License. You may obtain a copy of the License at
+@REM
+@REM http://www.apache.org/licenses/LICENSE-2.0
+@REM
+@REM Unless required by applicable law or agreed to in writing,
+@REM software distributed under the License is distributed on an
+@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+@REM KIND, either express or implied. See the License for the
+@REM specific language governing permissions and limitations
+@REM under the License.
+@REM
+
+@echo off
+
+set current_dir=%~dp0
+set superior_dir=%current_dir%\..\
+
+IF EXIST "%superior_dir%\conf\application.properties" (
+ set config_file="%superior_dir%\conf\application.properties"
+) ELSE (
+ IF EXIST "%superior_dir%\conf\application.properties" (
+ set config_file=%superior_dir%\conf\application.properties
+ ) ELSE (
+ echo No configuration file found. Exiting.
+ exit /b 1
+ )
+)
+
+if not defined config_file (
+ echo No configuration file found. Exiting.
+ exit /b 1
+)
+
+for /f "eol=# tokens=2 delims==" %%i in ('findstr /i "^api_service_port"
+"%config_file%"') do (
+ set api_service_port=%%i
+)
+
+if not defined api_service_port (
+ echo "WARNING: api_service_port not found in the configuration file. Using default value api_service_port = 17070"
+ set api_service_port=17070
+)
+
+echo Check whether the rpc_port is used..., port is %dn_rpc_port%
+
+for /f "eol=# tokens=2 delims==" %%i in ('findstr /i "api_service_address"
+"%config_file%"') do (
+ set api_service_address=%%i
+)
+
+if not defined api_service_address (
+ echo "WARNING: api_service_address not found in the configuration file. Using default value api_service_address = 0.0.0.0"
+ set api_service_address=0.0.0.0
+)
+
+for /f "tokens=5" %%a in ('netstat /ano ^| findstr %api_service_address%:%api_service_port%') do (
+ taskkill /f /pid %%a
+ echo Close DataNode, PID: %%a
+)
+rem ps ax | grep -i 'iotdb.Application' | grep -v grep | awk '{print $1}' | xargs kill -SIGTERM
diff --git a/iotdb-collector/collector-core/src/assembly/resources/sbin/stop-collector.sh b/iotdb-collector/collector-core/src/assembly/resources/sbin/stop-collector.sh
new file mode 100644
index 0000000..51322fa
--- /dev/null
+++ b/iotdb-collector/collector-core/src/assembly/resources/sbin/stop-collector.sh
@@ -0,0 +1,86 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+source "$(dirname "$0")/common.sh"
+COLLECTOR_CONF="`dirname "$0"`/../conf"
+
+if [ -f "${COLLECTOR_CONF}/application.properties" ]; then
+ api_service_port=`sed '/^api_service_port=/!d;s/.*=//' ${COLLECTOR_CONF}/application.properties | tr -d '\r'`
+else
+ api_service_port=`sed '/^api_service_port=/!d;s/.*=//' ${COLLECTOR_CONF}/application.properties | tr -d '\r'`
+fi
+
+if [ -z "$api_service_port" ]; then
+ echo "WARNING: api_service_port not found in the configuration file. Using default value api_service_port=17070"
+ api_service_port=17070
+fi
+
+check_config_unique "api_service_port" "$api_service_port"
+
+force=""
+
+while true; do
+ case "$1" in
+ -f)
+ force="yes"
+ break
+ ;;
+ "")
+ #if we do not use getopt, we then have to process the case that there is no argument.
+ #in some systems, when there is no argument, shift command may throw error, so we skip directly
+ #all others are args to the program
+ PARAMS=$*
+ break
+ ;;
+ esac
+done
+
+echo "Check whether the api_service_port is used..., port is" $api_service_port
+
+if type lsof > /dev/null 2>&1 ; then
+ PID=$(lsof -t -i:"${api_service_port}" -sTCP:LISTEN)
+elif type netstat > /dev/null 2>&1 ; then
+ PID=$(netstat -anp 2>/dev/null | grep ":${api_service_port} " | grep ' LISTEN ' | awk '{print $NF}' | sed "s|/.*||g" )
+else
+ echo ""
+ echo " Error: No necessary tool."
+ echo " Please install 'lsof' or 'netstat'."
+ exit 1
+fi
+
+PID_VERIFY=$(ps ax | grep -i 'Application' | grep java | grep -v grep | awk '{print $1}')
+if [ -z "$PID" ]; then
+ echo "No Application to stop"
+ if [ "$(id -u)" -ne 0 ]; then
+ echo "Maybe you can try to run in sudo mode to detect the process."
+ fi
+ exit 1
+elif [[ "${PID_VERIFY}" =~ ${PID} ]]; then
+ if [[ "${force}" == "yes" ]]; then
+ kill -9 "$PID"
+ echo "Force to stop Application, PID:" "$PID"
+ else
+ kill -s TERM "$PID"
+ echo "Stop Application, PID:" "$PID"
+ fi
+else
+ echo "No Application to stop"
+ exit 1
+fi
\ No newline at end of file
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/Application.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/Application.java
index 650caee..80b5b42 100644
--- a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/Application.java
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/Application.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.collector.config.Configuration;
import org.apache.iotdb.collector.service.ApiService;
import org.apache.iotdb.collector.service.IService;
+import org.apache.iotdb.collector.service.PeriodicalJobService;
import org.apache.iotdb.collector.service.PersistenceService;
import org.apache.iotdb.collector.service.RuntimeService;
@@ -41,6 +42,7 @@ private Application() {
services.add(new RuntimeService());
services.add(new ApiService());
services.add(new PersistenceService());
+ services.add(new PeriodicalJobService());
}
public static void main(String[] args) {
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/config/Configuration.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/config/Configuration.java
index 115c1c0..83b84d3 100644
--- a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/config/Configuration.java
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/config/Configuration.java
@@ -22,10 +22,12 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
+import java.net.MalformedURLException;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.util.Optional;
@@ -36,6 +38,8 @@ public class Configuration {
private static final Logger LOGGER = LoggerFactory.getLogger(Configuration.class);
private static final String CONFIG_FILE_NAME = "application.properties";
+ public static final String COLLECTOR_CONF = "COLLECTOR_CONF";
+ public static final String COLLECTOR_HOME = "COLLECTOR_HOME";
private final Options options = new Options();
@@ -69,19 +73,44 @@ private void loadProps() {
}
private Optional getPropsUrl() {
- final URL url = Options.class.getResource("/" + CONFIG_FILE_NAME);
+ String urlString = getConfDir();
+ if (urlString == null) {
+ final URL uri = Options.class.getResource("/" + CONFIG_FILE_NAME);
+ if (uri != null) {
+ return Optional.of(uri);
+ } else {
+ LOGGER.warn(
+ "Cannot find IOTDB_COLLECTOR_HOME or IOTDB_COLLECTOR_CONF environment variable when loading "
+ + "config file {}, use default configuration",
+ CONFIG_FILE_NAME);
+ return Optional.empty();
+ }
+ } else if (!urlString.endsWith(".properties")) {
+ urlString += (File.separatorChar + CONFIG_FILE_NAME);
+ }
- if (url != null) {
- return Optional.of(url);
- } else {
- LOGGER.warn(
- "Cannot find IOTDB_COLLECTOR_HOME or IOTDB_COLLECTOR_CONF environment variable when loading "
- + "config file {}, use default configuration",
- CONFIG_FILE_NAME);
+ try {
+ if (!urlString.startsWith("file:") && !urlString.startsWith("classpath:")) {
+ urlString = "file:" + urlString;
+ }
+ return Optional.of(new URL(urlString));
+ } catch (final MalformedURLException e) {
+ LOGGER.warn("get url failed", e);
return Optional.empty();
}
}
+ public String getConfDir() {
+ String confString = System.getProperty(COLLECTOR_CONF, null);
+ if (confString == null) {
+ confString = System.getProperty(COLLECTOR_HOME, null);
+ if (confString != null) {
+ confString = confString + File.separatorChar + "conf";
+ }
+ }
+ return confString;
+ }
+
public void logAllOptions() {
options.logAllOptions();
}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/config/Options.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/config/Options.java
index 16fd814..ce262d6 100644
--- a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/config/Options.java
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/config/Options.java
@@ -22,10 +22,13 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.io.File;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
+import static org.apache.iotdb.collector.config.Configuration.COLLECTOR_HOME;
+
public class Options {
private static final Logger LOGGER = LoggerFactory.getLogger(Options.class);
@@ -37,6 +40,7 @@ public class Options {
Class.forName(ApiServiceOptions.class.getName());
Class.forName(TaskRuntimeOptions.class.getName());
Class.forName(PluginRuntimeOptions.class.getName());
+ Class.forName(PipeRuntimeOptions.class.getName());
} catch (final ClassNotFoundException e) {
throw new RuntimeException("Failed to load options", e);
}
@@ -73,6 +77,15 @@ public T value() {
public abstract void setValue(final String valueString);
+ protected String addHomeDir(String dir) {
+ final String homeDir = System.getProperty(COLLECTOR_HOME, null);
+ if (!new File(dir).isAbsolute() && homeDir != null && !homeDir.isEmpty()) {
+ dir =
+ !homeDir.endsWith(File.separator) ? homeDir + File.separatorChar + dir : homeDir + dir;
+ }
+ return dir;
+ }
+
@Override
public String toString() {
return key + " = " + value();
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/config/PipeRuntimeOptions.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/config/PipeRuntimeOptions.java
new file mode 100644
index 0000000..0abd49a
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/config/PipeRuntimeOptions.java
@@ -0,0 +1,283 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.config;
+
+import org.apache.iotdb.rpc.RpcUtils;
+
+import java.util.concurrent.TimeUnit;
+
+public class PipeRuntimeOptions extends Options {
+ public static final Option PIPE_ALL_SINK_RATE_LIMIT_BYTES_PER_SECOND =
+ new Option("pipe_all_sinks_rate_limit_bytes_per_second", -1d) {
+ @Override
+ public void setValue(final String valueString) {
+ value = Double.parseDouble(valueString);
+ }
+ };
+
+ public static final Option RATE_LIMITER_HOT_RELOAD_CHECK_INTERVAL_MS =
+ new Option("rate_limiter_hot_reload_check_interval_ms", 1000) {
+ @Override
+ public void setValue(final String valueString) {
+ value = Integer.parseInt(valueString);
+ }
+ };
+
+ public static final Option MAX_RETRY_TIMES =
+ new Option("max_retry_times", 5) {
+ @Override
+ public void setValue(final String valueString) {
+ value = Integer.parseInt(valueString);
+ }
+ };
+
+ public static final Option RPC_PORT =
+ new Option("rpc_port", 6667) {
+ @Override
+ public void setValue(final String valueString) {
+ value = Integer.parseInt(valueString);
+ }
+ };
+
+ public static final Option RPC_ADDRESS =
+ new Option("rpc_address", "0.0.0.0") {
+ @Override
+ public void setValue(final String valueString) {
+ value = valueString;
+ }
+ };
+
+ public static final Option PIPE_CONNECTOR_READ_FILE_BUFFER_SIZE =
+ new Option("pipe_connector_read_file_buffer_size", 8388608) {
+ @Override
+ public void setValue(final String valueString) {
+ value = Integer.parseInt(valueString);
+ }
+ };
+
+ public static final Option PIPE_CONNECTOR_TRANSFER_TIMEOUT_MS =
+ new Option("pipe_connector_transfer_timeout_ms", 15 * 60 * 1000) {
+ @Override
+ public void setValue(final String valueString) {
+ value = Integer.parseInt(valueString);
+ }
+ };
+
+ public static final Option THRIFT_FRAME_MAX_SIZE =
+ new Option("thrift_frame_max_size", 546870912) {
+ @Override
+ public void setValue(final String valueString) {
+ value = Integer.parseInt(valueString);
+ }
+ };
+
+ public static final Option IS_PIPE_CONNECTOR_RPC_THRIFT_COMPRESSION_ENABLED =
+ new Option("is_pipe_connector_rpc_thrift_compression_enabled", false) {
+ @Override
+ public void setValue(final String valueString) {
+ value = Boolean.parseBoolean(valueString);
+ }
+ };
+
+ public static final Option TIMESTAMP_PRECISION =
+ new Option("timestamp_precision", "ms") {
+ @Override
+ public void setValue(final String valueString) {
+ value = valueString;
+ }
+ };
+
+ public static final Option CLUSTER_ID =
+ new Option("cluster_id", "") {
+ @Override
+ public void setValue(final String valueString) {
+ value = valueString;
+ }
+ };
+
+ public static final Option PIPE_LEADER_CACHE_MEMORY_USAGE_PERCENTAGE =
+ new Option("pipe_leader_cache_memory_usage_percentage", 0.1f) {
+ @Override
+ public void setValue(String valueString) {
+ value = Float.parseFloat(valueString);
+ }
+ };
+
+ public static final Option PIPE_EVENT_REFERENCE_TRACKING_ENABLED =
+ new Option("pipe_event_reference_tracking_enabled", true) {
+ @Override
+ public void setValue(String valueString) {
+ value = Boolean.parseBoolean(valueString);
+ }
+ };
+
+ public static volatile Option PIPE_CHECK_MEMORY_ENOUGH_INTERVAL_MS =
+ new Option("pipe_check_memory_enough_interval_ms", 10L) {
+ @Override
+ public void setValue(final String valueString) {
+ value = Long.parseLong(valueString);
+ }
+ };
+
+ public static final Option PIPE_CONNECTOR_READ_FILE_BUFFER_MEMORY_CONTROL =
+ new Option("pipe_connector_read_file_buffer_memory_control", false) {
+ @Override
+ public void setValue(final String valueString) {
+ value = Boolean.parseBoolean(valueString);
+ }
+ };
+
+ public static final Option PIPE_SUBTASK_EXECUTOR_MAX_THREAD_NUM =
+ new Option(
+ "pipe_subtask_executor_max_thread_num",
+ Math.max(5, Runtime.getRuntime().availableProcessors() / 2)) {
+ @Override
+ public void setValue(final String valueString) {
+ value = Integer.parseInt(valueString);
+ }
+ };
+
+ public static final Option PIPE_DATA_STRUCTURE_TABLET_SIZE_IN_BYTES =
+ new Option("pipe_data_structure_tablet_size_in_bytes", 2097152) {
+ @Override
+ public void setValue(final String valueString) {
+ value = Integer.parseInt(valueString);
+ }
+ };
+
+ public static final Option PIPE_DATA_STRUCTURE_TABLET_ROW_SIZE =
+ new Option("pipe_data_structure_tablet_row_size", 2048) {
+ @Override
+ public void setValue(final String valueString) {
+ value = Integer.parseInt(valueString);
+ }
+ };
+
+ public static final Option PIPE_AIR_GAP_RECEIVER_ENABLED =
+ new Option("pipe_air_gap_receiver_enabled", true) {
+
+ @Override
+ public void setValue(final String valueString) {
+ value = Boolean.parseBoolean(valueString);
+ }
+ };
+
+ public static final Option PIPE_AIR_GAP_RECEIVER_PORT =
+ new Option("pipe_air_gap_receiver_port", 9780) {
+ @Override
+ public void setValue(final String valueString) {
+ value = Integer.parseInt(valueString);
+ }
+ };
+
+ public static final Option PIPE_SUBTASK_EXECUTOR_PENDING_QUEUE_MAX_BLOCKING_TIME_MS =
+ new Option("pipe_subtask_executor_pending_queue_max_blocking_time_ms", 1000L) {
+ @Override
+ public void setValue(final String valueString) {
+ value = Long.parseLong(valueString);
+ }
+ };
+
+ public static final Option TIMESTAMP_PRECISION_CHECK_ENABLED =
+ new Option("timestamp_precision_check_enabled", true) {
+ @Override
+ public void setValue(final String valueString) {
+ value = Boolean.parseBoolean(valueString);
+ }
+ };
+
+ public static final Option DN_CONNECTION_TIMEOUT_IN_MS =
+ new Option("dn_connection_timeout_in_ms", (int) TimeUnit.SECONDS.toMillis(60)) {
+ @Override
+ public void setValue(final String valueString) {
+ value = Integer.parseInt(valueString);
+ }
+ };
+
+ public static final Option IS_RPC_THRIFT_COMPRESSION_ENABLED =
+ new Option("is_rpc_thrift_compression_enabled", false) {
+ @Override
+ public void setValue(final String valueString) {
+ value = Boolean.parseBoolean(valueString);
+ }
+ };
+
+ public static final Option PIPE_CONNECTOR_REQUEST_SLICE_THRESHOLD_BYTES =
+ new Option(
+ "pipe_connector_request_slice_threshold_bytes",
+ (int) (RpcUtils.THRIFT_FRAME_MAX_SIZE * 0.8)) {
+ @Override
+ public void setValue(final String valueString) {
+ value = Integer.parseInt(valueString);
+ }
+ };
+
+ public static final Option PIPE_CONNECTOR_HANDSHAKE_TIMEOUT_MS =
+ new Option("pipe_connector_handshake_timeout_ms", 10 * 1000) {
+ @Override
+ public void setValue(final String valueString) {
+ value = Integer.parseInt(valueString);
+ }
+ };
+
+ public static final Option PIPE_CONNECTOR_RPC_THRIFT_COMPRESSION_ENABLED =
+ new Option("pipe_connector_rpc_thrift_compression_enabled", false) {
+ @Override
+ public void setValue(final String valueString) {
+ value = Boolean.parseBoolean(valueString);
+ }
+ };
+
+ public static final Option PIPE_ASYNC_CONNECTOR_SELECTOR_NUMBER =
+ new Option(
+ "pipe_async_connector_selector_number",
+ Math.max(4, Runtime.getRuntime().availableProcessors() / 2)) {
+ @Override
+ public void setValue(final String valueString) {
+ value = Integer.parseInt(valueString);
+ }
+ };
+
+ public static final Option PIPE_ASYNC_CONNECTOR_MAX_CLIENT_NUMBER =
+ new Option(
+ "pipe_async_connector_max_client_number",
+ Math.max(16, Runtime.getRuntime().availableProcessors() / 2)) {
+ @Override
+ public void setValue(final String valueString) {
+ value = Integer.parseInt(valueString);
+ }
+ };
+
+ public static volatile Option DATA_NODE_ID =
+ new Option("data_node_id", -1) {
+ @Override
+ public void setValue(final String valueString) {
+ value = Integer.parseInt(valueString);
+ }
+ };
+
+ public static final Option EXECUTOR_CRON_HEARTBEAT_EVENT_INTERVAL_SECONDS =
+ new Option("executor_cron_heartbeat_event_interval_seconds", 20L) {
+ @Override
+ public void setValue(final String valueString) {
+ value = Long.parseLong(valueString);
+ }
+ };
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/config/PluginRuntimeOptions.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/config/PluginRuntimeOptions.java
index 92eb04b..92a745e 100644
--- a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/config/PluginRuntimeOptions.java
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/config/PluginRuntimeOptions.java
@@ -23,10 +23,10 @@
public class PluginRuntimeOptions extends Options {
public static final Option PLUGIN_LIB_DIR =
- new Option("plugin_lib_dir", "ext" + File.separator + "plugin") {
+ new Option("plugin_lib_dir", "system" + File.separator + "plugin") {
@Override
public void setValue(final String valueString) {
- value = valueString;
+ value = addHomeDir(valueString);
}
};
@@ -35,7 +35,17 @@ public void setValue(final String valueString) {
"plugin_install_lib_dir", PLUGIN_LIB_DIR.value() + File.separator + "install") {
@Override
public void setValue(final String valueString) {
- value = valueString;
+ value = addHomeDir(valueString);
+ }
+ };
+
+ public static final Option PLUGIN_DATABASE_FILE_PATH =
+ new Option(
+ "plugin_database_file_path",
+ "system" + File.separator + "database" + File.separator + "plugin.db") {
+ @Override
+ public void setValue(final String valueString) {
+ value = addHomeDir(valueString);
}
};
}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/config/TaskRuntimeOptions.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/config/TaskRuntimeOptions.java
index 70efcc9..dc35f33 100644
--- a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/config/TaskRuntimeOptions.java
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/config/TaskRuntimeOptions.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.collector.config;
+import java.io.File;
+
public class TaskRuntimeOptions extends Options {
public static final Option TASK_SOURCE_PARALLELISM_NUM =
@@ -48,7 +50,7 @@ public void setValue(final String valueString) {
public static final Option TASK_PROCESSOR_RING_BUFFER_SIZE =
new Option("task_processor_ring_buffer_size", 1024) {
@Override
- public void setValue(String valueString) {
+ public void setValue(final String valueString) {
value = Integer.parseInt(valueString);
}
};
@@ -56,8 +58,18 @@ public void setValue(String valueString) {
public static final Option TASK_SINK_RING_BUFFER_SIZE =
new Option("task_sink_ring_buffer_size", 1024) {
@Override
- public void setValue(String valueString) {
+ public void setValue(final String valueString) {
value = Integer.parseInt(valueString);
}
};
+
+ public static final Option TASK_DATABASE_FILE_PATH =
+ new Option(
+ "task_database_file_path",
+ "system" + File.separator + "database" + File.separator + "task.db") {
+ @Override
+ public void setValue(final String valueString) {
+ value = addHomeDir(valueString);
+ }
+ };
}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/persistence/DBConstant.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/persistence/DBConstant.java
index 2e8c3bb..5e18ce1 100644
--- a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/persistence/DBConstant.java
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/persistence/DBConstant.java
@@ -19,6 +19,9 @@
package org.apache.iotdb.collector.persistence;
+import org.apache.iotdb.collector.config.PluginRuntimeOptions;
+import org.apache.iotdb.collector.config.TaskRuntimeOptions;
+
public class DBConstant {
public static final String CREATE_PLUGIN_TABLE_SQL =
@@ -41,9 +44,11 @@ public class DBConstant {
+ " create_time TEXT NOT NULL\n"
+ ");";
- public static final String PLUGIN_DATABASE_FILE_PATH = "ext/db/plugin.db";
- public static final String TASK_DATABASE_FILE_PATH = "ext/db/task.db";
+ public static final String PLUGIN_DATABASE_FILE_PATH = "system/database/plugin.db";
+ public static final String TASK_DATABASE_FILE_PATH = "system/database/task.db";
- public static final String PLUGIN_DATABASE_URL = "jdbc:sqlite:" + PLUGIN_DATABASE_FILE_PATH;
- public static final String TASK_DATABASE_URL = "jdbc:sqlite:" + TASK_DATABASE_FILE_PATH;
+ public static final String PLUGIN_DATABASE_URL =
+ "jdbc:sqlite:" + PluginRuntimeOptions.PLUGIN_DATABASE_FILE_PATH.value();
+ public static final String TASK_DATABASE_URL =
+ "jdbc:sqlite:" + TaskRuntimeOptions.TASK_DATABASE_FILE_PATH.value();
}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/persistence/Persistence.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/persistence/Persistence.java
index b2abcb8..72d5670 100644
--- a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/persistence/Persistence.java
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/persistence/Persistence.java
@@ -29,6 +29,7 @@ public abstract class Persistence {
public Persistence(final String databaseUrl) {
this.databaseUrl = databaseUrl;
+
initDatabaseFileIfPossible();
initTableIfPossible();
}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/persistence/PluginPersistence.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/persistence/PluginPersistence.java
index d1d2b20..f575fd9 100644
--- a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/persistence/PluginPersistence.java
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/persistence/PluginPersistence.java
@@ -19,8 +19,9 @@
package org.apache.iotdb.collector.persistence;
-import org.apache.iotdb.collector.runtime.plugin.utils.PluginFileUtils;
+import org.apache.iotdb.collector.config.PluginRuntimeOptions;
import org.apache.iotdb.collector.service.RuntimeService;
+import org.apache.iotdb.collector.utils.PluginFileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -49,7 +50,8 @@ public PluginPersistence(String databaseUrl) {
@Override
protected void initDatabaseFileIfPossible() {
try {
- final Path pluginDatabaseFilePath = Paths.get(DBConstant.PLUGIN_DATABASE_FILE_PATH);
+ final Path pluginDatabaseFilePath =
+ Paths.get(PluginRuntimeOptions.PLUGIN_DATABASE_FILE_PATH.value());
if (!Files.exists(pluginDatabaseFilePath)) {
Files.createFile(pluginDatabaseFilePath);
}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/persistence/TaskPersistence.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/persistence/TaskPersistence.java
index bf62519..b911fe1 100644
--- a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/persistence/TaskPersistence.java
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/persistence/TaskPersistence.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.collector.persistence;
+import org.apache.iotdb.collector.config.TaskRuntimeOptions;
import org.apache.iotdb.collector.runtime.task.TaskStateEnum;
import org.apache.iotdb.collector.service.RuntimeService;
import org.apache.iotdb.tsfile.utils.PublicBAOS;
@@ -55,7 +56,8 @@ public TaskPersistence(String databaseUrl) {
@Override
protected void initDatabaseFileIfPossible() {
try {
- final Path taskDatabaseFilePath = Paths.get(DBConstant.TASK_DATABASE_FILE_PATH);
+ final Path taskDatabaseFilePath =
+ Paths.get(TaskRuntimeOptions.TASK_DATABASE_FILE_PATH.value());
if (!Files.exists(taskDatabaseFilePath)) {
Files.createFile(taskDatabaseFilePath);
}
@@ -119,6 +121,7 @@ public void tryRecoverTask(
if (Objects.isNull(response) || response.getStatus() != Response.Status.OK.getStatusCode()) {
LOGGER.warn("Failed to recover task persistence message, because {}", response);
+ tryDeleteTask(taskId);
}
}
@@ -160,6 +163,8 @@ public void tryPersistenceTask(
statement.setBytes(5, sinkAttributeBuffer);
statement.setString(6, String.valueOf(new Timestamp(System.currentTimeMillis())));
statement.executeUpdate();
+
+ LOGGER.info("successfully persisted task {} info", taskId);
} catch (final SQLException | IOException e) {
LOGGER.warn("Failed to persistence task message, because {}", e.getMessage());
}
@@ -186,6 +191,8 @@ public void tryDeleteTask(final String taskId) {
final PreparedStatement statement = connection.prepareStatement(deleteSQL);
statement.setString(1, taskId);
statement.executeUpdate();
+
+ LOGGER.info("successfully deleted task {}", taskId);
} catch (final SQLException e) {
LOGGER.warn("Failed to delete task persistence message, because {}", e.getMessage());
}
@@ -199,6 +206,8 @@ public void tryAlterTaskState(final String taskId, final TaskStateEnum taskState
statement.setInt(1, taskState.getTaskState());
statement.setString(2, taskId);
statement.executeUpdate();
+
+ LOGGER.info("successfully altered task {}", taskId);
} catch (SQLException e) {
LOGGER.warn("Failed to alter task persistence message, because {}", e.getMessage());
}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/api/PullSource.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/api/PullSource.java
index e15a493..47313ee 100644
--- a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/api/PullSource.java
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/api/PullSource.java
@@ -20,5 +20,15 @@
package org.apache.iotdb.collector.plugin.api;
import org.apache.iotdb.pipe.api.PipeSource;
+import org.apache.iotdb.pipe.api.customizer.configuration.PipeExtractorRuntimeConfiguration;
+import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters;
-public abstract class PullSource implements PipeSource {}
+public abstract class PullSource implements PipeSource {
+
+ @Override
+ public final void customize(
+ PipeParameters pipeParameters,
+ PipeExtractorRuntimeConfiguration pipeExtractorRuntimeConfiguration) {
+ throw new UnsupportedOperationException();
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/api/PushSource.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/api/PushSource.java
index e8eebf4..7663c09 100644
--- a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/api/PushSource.java
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/api/PushSource.java
@@ -21,6 +21,8 @@
import org.apache.iotdb.pipe.api.PipeSource;
import org.apache.iotdb.pipe.api.collector.EventCollector;
+import org.apache.iotdb.pipe.api.customizer.configuration.PipeExtractorRuntimeConfiguration;
+import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters;
import org.apache.iotdb.pipe.api.event.Event;
public abstract class PushSource implements PipeSource {
@@ -31,6 +33,14 @@ public PushSource() {
this.collector = null;
}
+ @Override
+ public final void customize(
+ PipeParameters pipeParameters,
+ PipeExtractorRuntimeConfiguration pipeExtractorRuntimeConfiguration)
+ throws Exception {
+ throw new UnsupportedOperationException();
+ }
+
public final void setCollector(final EventCollector collector) {
this.collector = collector;
}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/api/customizer/CollectorRuntimeEnvironment.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/api/customizer/CollectorRuntimeEnvironment.java
index 8ace81c..07c60a2 100644
--- a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/api/customizer/CollectorRuntimeEnvironment.java
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/api/customizer/CollectorRuntimeEnvironment.java
@@ -49,6 +49,11 @@ public long getCreationTime() {
return creationTime;
}
+ @Override
+ public int getRegionId() {
+ return getInstanceIndex();
+ }
+
public int getParallelism() {
return parallelism;
}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/api/event/PeriodicalEvent.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/api/event/PeriodicalEvent.java
new file mode 100644
index 0000000..6d85c9c
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/api/event/PeriodicalEvent.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.api.event;
+
+import org.apache.iotdb.pipe.api.event.Event;
+
+public class PeriodicalEvent implements Event {}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/BuiltinPlugin.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/BuiltinPlugin.java
index 203570c..3ae111f 100644
--- a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/BuiltinPlugin.java
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/BuiltinPlugin.java
@@ -20,9 +20,12 @@
package org.apache.iotdb.collector.plugin.builtin;
import org.apache.iotdb.collector.plugin.builtin.processor.DoNothingProcessor;
+import org.apache.iotdb.collector.plugin.builtin.processor.SubscriptionProcessor;
import org.apache.iotdb.collector.plugin.builtin.sink.DemoSink;
+import org.apache.iotdb.collector.plugin.builtin.sink.protocol.IoTDBDataRegionSyncConnector;
import org.apache.iotdb.collector.plugin.builtin.source.HttpPullSource;
import org.apache.iotdb.collector.plugin.builtin.source.HttpPushSource;
+import org.apache.iotdb.collector.plugin.builtin.source.IoTDBPushSource;
public enum BuiltinPlugin {
@@ -31,12 +34,15 @@ public enum BuiltinPlugin {
// Pull Sources
HTTP_PULL_SOURCE("http-pull-source", HttpPullSource.class),
+ SUBSCRIPTION_SOURCE("subscription-source", IoTDBPushSource.class),
// Processors
DO_NOTHING_PROCESSOR("do-nothing-processor", DoNothingProcessor.class),
+ SUBSCRIPTION_PROCESSOR("subscription-processor", SubscriptionProcessor.class),
// Sinks
- IOTDB_THRIFT_SINK("iotdb-thrift-sink", DemoSink.class);
+ IOTDB_DEMO_SINK("iotdb-demo-sink", DemoSink.class),
+ IOTDB_SYNC_SINK("iotdb-sync-sink", IoTDBDataRegionSyncConnector.class);
private final String collectorPluginName;
private final Class> collectorPluginClass;
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/processor/SubscriptionProcessor.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/processor/SubscriptionProcessor.java
new file mode 100644
index 0000000..fdf2c9e
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/processor/SubscriptionProcessor.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.processor;
+
+import org.apache.iotdb.collector.plugin.builtin.source.event.SubDemoEvent;
+import org.apache.iotdb.pipe.api.PipeProcessor;
+import org.apache.iotdb.pipe.api.collector.EventCollector;
+import org.apache.iotdb.pipe.api.customizer.configuration.PipeProcessorRuntimeConfiguration;
+import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameterValidator;
+import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters;
+import org.apache.iotdb.pipe.api.event.Event;
+import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent;
+
+public class SubscriptionProcessor implements PipeProcessor {
+ @Override
+ public void validate(PipeParameterValidator pipeParameterValidator) throws Exception {}
+
+ @Override
+ public void customize(
+ PipeParameters pipeParameters,
+ PipeProcessorRuntimeConfiguration pipeProcessorRuntimeConfiguration)
+ throws Exception {}
+
+ @Override
+ public void process(TabletInsertionEvent tabletInsertionEvent, EventCollector eventCollector)
+ throws Exception {
+ eventCollector.collect(tabletInsertionEvent);
+ }
+
+ @Override
+ public void process(Event event, EventCollector eventCollector) throws Exception {
+ if (event instanceof SubDemoEvent) {
+ process((SubDemoEvent) event, eventCollector);
+ }
+ }
+
+ @Override
+ public void close() throws Exception {}
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/IoTDBClientManager.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/IoTDBClientManager.java
new file mode 100644
index 0000000..521debe
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/IoTDBClientManager.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.client;
+
+import org.apache.iotdb.collector.config.PipeRuntimeOptions;
+import org.apache.iotdb.common.rpc.thrift.TEndPoint;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.net.SocketTimeoutException;
+import java.util.List;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicInteger;
+
+public abstract class IoTDBClientManager {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBClientManager.class);
+
+ protected final List endPointList;
+ protected long currentClientIndex = 0;
+
+ protected final boolean useLeaderCache;
+
+ protected final String username;
+ protected final String password;
+
+ protected final boolean validateTsFile;
+
+ protected final boolean shouldReceiverConvertOnTypeMismatch;
+ protected final String loadTsFileStrategy;
+
+ protected final boolean shouldMarkAsPipeRequest;
+
+ // This flag indicates whether the receiver supports mods transferring if
+ // it is a DataNode receiver. The flag is useless for configNode receiver.
+ protected boolean supportModsIfIsDataNodeReceiver = true;
+
+ private static final int MAX_CONNECTION_TIMEOUT_MS = 24 * 60 * 60 * 1000; // 1 day
+ private static final int FIRST_ADJUSTMENT_TIMEOUT_MS = 6 * 60 * 60 * 1000; // 6 hours
+ protected static final AtomicInteger CONNECTION_TIMEOUT_MS =
+ new AtomicInteger(PipeRuntimeOptions.PIPE_CONNECTOR_TRANSFER_TIMEOUT_MS.value());
+
+ protected IoTDBClientManager(
+ final List endPointList,
+ /* The following parameters are used locally. */
+ final boolean useLeaderCache,
+ /* The following parameters are used to handshake with the receiver. */
+ final String username,
+ final String password,
+ final boolean shouldReceiverConvertOnTypeMismatch,
+ final String loadTsFileStrategy,
+ final boolean validateTsFile,
+ final boolean shouldMarkAsPipeRequest) {
+ this.endPointList = endPointList;
+
+ this.useLeaderCache = useLeaderCache;
+
+ this.username = username;
+ this.password = password;
+ this.shouldReceiverConvertOnTypeMismatch = shouldReceiverConvertOnTypeMismatch;
+ this.loadTsFileStrategy = loadTsFileStrategy;
+ this.validateTsFile = validateTsFile;
+ this.shouldMarkAsPipeRequest = shouldMarkAsPipeRequest;
+ }
+
+ public boolean supportModsIfIsDataNodeReceiver() {
+ return supportModsIfIsDataNodeReceiver;
+ }
+
+ public void adjustTimeoutIfNecessary(Throwable e) {
+ do {
+ if (e instanceof SocketTimeoutException || e instanceof TimeoutException) {
+ int newConnectionTimeout;
+ try {
+ newConnectionTimeout =
+ Math.min(
+ Math.max(
+ FIRST_ADJUSTMENT_TIMEOUT_MS,
+ Math.toIntExact(CONNECTION_TIMEOUT_MS.get() * 2L)),
+ MAX_CONNECTION_TIMEOUT_MS);
+ } catch (ArithmeticException arithmeticException) {
+ newConnectionTimeout = MAX_CONNECTION_TIMEOUT_MS;
+ }
+
+ if (newConnectionTimeout != CONNECTION_TIMEOUT_MS.get()) {
+ CONNECTION_TIMEOUT_MS.set(newConnectionTimeout);
+ LOGGER.info(
+ "Pipe connection timeout is adjusted to {} ms ({} mins)",
+ newConnectionTimeout,
+ newConnectionTimeout / 60000.0);
+ }
+ return;
+ }
+ } while ((e = e.getCause()) != null);
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/IoTDBDataNodeCacheLeaderClientManager.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/IoTDBDataNodeCacheLeaderClientManager.java
new file mode 100644
index 0000000..dff5f2d
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/IoTDBDataNodeCacheLeaderClientManager.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.client;
+
+import org.apache.iotdb.common.rpc.thrift.TEndPoint;
+
+import com.github.benmanes.caffeine.cache.Cache;
+import com.github.benmanes.caffeine.cache.Caffeine;
+import com.github.benmanes.caffeine.cache.Weigher;
+import com.google.common.util.concurrent.AtomicDouble;
+
+import java.util.concurrent.ConcurrentHashMap;
+
+public interface IoTDBDataNodeCacheLeaderClientManager {
+
+ LeaderCacheManager LEADER_CACHE_MANAGER = new LeaderCacheManager();
+
+ class LeaderCacheManager {
+
+ private final AtomicDouble memoryUsageCheatFactor = new AtomicDouble(1);
+
+ // leader cache built by LRU
+ private final Cache device2endpoint;
+ // a hashmap to reuse the created endpoint
+ private final ConcurrentHashMap endPoints = new ConcurrentHashMap<>();
+
+ public LeaderCacheManager() {
+ device2endpoint =
+ Caffeine.newBuilder()
+ .weigher(
+ (Weigher)
+ (device, endPoint) -> {
+ final long weightInLong =
+ (long) (device.getBytes().length * memoryUsageCheatFactor.get());
+ if (weightInLong <= 0) {
+ return Integer.MAX_VALUE;
+ }
+ final int weightInInt = (int) weightInLong;
+ return weightInInt != weightInLong ? Integer.MAX_VALUE : weightInInt;
+ })
+ .recordStats()
+ .build();
+ }
+
+ public TEndPoint getLeaderEndPoint(final String deviceId) {
+ return deviceId == null ? null : device2endpoint.getIfPresent(deviceId);
+ }
+
+ public void updateLeaderEndPoint(final String deviceId, final TEndPoint endPoint) {
+ if (deviceId == null || endPoint == null) {
+ return;
+ }
+
+ final TEndPoint endPointFromMap = endPoints.putIfAbsent(endPoint, endPoint);
+ if (endPointFromMap != null) {
+ device2endpoint.put(deviceId, endPointFromMap);
+ } else {
+ device2endpoint.put(deviceId, endPoint);
+ }
+ }
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/IoTDBDataNodeSyncClientManager.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/IoTDBDataNodeSyncClientManager.java
new file mode 100644
index 0000000..8992f8e
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/IoTDBDataNodeSyncClientManager.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.client;
+
+import org.apache.iotdb.collector.config.PipeRuntimeOptions;
+import org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request.PipeTransferDataNodeHandshakeV1Req;
+import org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request.PipeTransferDataNodeHandshakeV2Req;
+import org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request.PipeTransferHandshakeV2Req;
+import org.apache.iotdb.common.rpc.thrift.TEndPoint;
+
+import org.apache.tsfile.utils.Pair;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+public class IoTDBDataNodeSyncClientManager extends IoTDBSyncClientManager
+ implements IoTDBDataNodeCacheLeaderClientManager {
+
+ private static final Logger LOGGER =
+ LoggerFactory.getLogger(IoTDBDataNodeSyncClientManager.class);
+
+ public IoTDBDataNodeSyncClientManager(
+ final List endPoints,
+ final boolean useSSL,
+ final String trustStorePath,
+ final String trustStorePwd,
+ /* The following parameters are used locally. */
+ final boolean useLeaderCache,
+ final String loadBalanceStrategy,
+ /* The following parameters are used to handshake with the receiver. */
+ final String username,
+ final String password,
+ final boolean shouldReceiverConvertOnTypeMismatch,
+ final String loadTsFileStrategy,
+ final boolean validateTsFile,
+ final boolean shouldMarkAsPipeRequest) {
+ super(
+ endPoints,
+ useSSL,
+ trustStorePath,
+ trustStorePwd,
+ useLeaderCache,
+ loadBalanceStrategy,
+ username,
+ password,
+ shouldReceiverConvertOnTypeMismatch,
+ loadTsFileStrategy,
+ validateTsFile,
+ shouldMarkAsPipeRequest);
+ }
+
+ @Override
+ protected PipeTransferDataNodeHandshakeV1Req buildHandshakeV1Req() throws IOException {
+ return PipeTransferDataNodeHandshakeV1Req.toTPipeTransferReq(
+ PipeRuntimeOptions.TIMESTAMP_PRECISION.value());
+ }
+
+ @Override
+ protected PipeTransferHandshakeV2Req buildHandshakeV2Req(final Map params)
+ throws IOException {
+ return PipeTransferDataNodeHandshakeV2Req.toTPipeTransferReq(params);
+ }
+
+ @Override
+ protected String getClusterId() {
+ return PipeRuntimeOptions.CLUSTER_ID.value();
+ }
+
+ public Pair getClient(final String deviceId) {
+ final TEndPoint endPoint = LEADER_CACHE_MANAGER.getLeaderEndPoint(deviceId);
+ return useLeaderCache
+ && endPoint != null
+ && endPoint2ClientAndStatus.containsKey(endPoint)
+ && Boolean.TRUE.equals(endPoint2ClientAndStatus.get(endPoint).getRight())
+ ? endPoint2ClientAndStatus.get(endPoint)
+ : getClient();
+ }
+
+ public Pair getClient(final TEndPoint endPoint) {
+ return useLeaderCache
+ && endPoint != null
+ && endPoint2ClientAndStatus.containsKey(endPoint)
+ && Boolean.TRUE.equals(endPoint2ClientAndStatus.get(endPoint).getRight())
+ ? endPoint2ClientAndStatus.get(endPoint)
+ : getClient();
+ }
+
+ public void updateLeaderCache(final String deviceId, final TEndPoint endPoint) {
+ if (!useLeaderCache || deviceId == null || endPoint == null) {
+ return;
+ }
+
+ try {
+ if (!endPoint2ClientAndStatus.containsKey(endPoint)) {
+ endPointList.add(endPoint);
+ endPoint2ClientAndStatus.put(endPoint, new Pair<>(null, false));
+ reconstructClient(endPoint);
+ }
+
+ LEADER_CACHE_MANAGER.updateLeaderEndPoint(deviceId, endPoint);
+ } catch (final Exception e) {
+ LOGGER.warn(
+ "Failed to update leader cache for device {} with endpoint {}:{}.",
+ deviceId,
+ endPoint.getIp(),
+ endPoint.getPort(),
+ e);
+ }
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/IoTDBSyncClient.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/IoTDBSyncClient.java
new file mode 100644
index 0000000..acb7be3
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/IoTDBSyncClient.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.client;
+
+import org.apache.iotdb.collector.config.PipeRuntimeOptions;
+import org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request.IoTDBConnectorRequestVersion;
+import org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request.PipeTransferSliceReq;
+import org.apache.iotdb.common.rpc.thrift.TEndPoint;
+import org.apache.iotdb.pipe.api.exception.PipeConnectionException;
+import org.apache.iotdb.rpc.DeepCopyRpcTransportFactory;
+import org.apache.iotdb.rpc.TSStatusCode;
+import org.apache.iotdb.rpc.TimeoutChangeableTransport;
+import org.apache.iotdb.service.rpc.thrift.IClientRPCService;
+import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq;
+import org.apache.iotdb.service.rpc.thrift.TPipeTransferResp;
+
+import org.apache.thrift.TException;
+import org.apache.thrift.transport.TTransport;
+import org.apache.thrift.transport.TTransportException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+public class IoTDBSyncClient extends IClientRPCService.Client
+ implements ThriftClient, AutoCloseable {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBSyncClient.class);
+
+ private static final AtomicInteger SLICE_ORDER_ID_GENERATOR = new AtomicInteger(0);
+
+ private final String ipAddress;
+ private final int port;
+ private final TEndPoint endPoint;
+
+ public IoTDBSyncClient(
+ ThriftClientProperty property,
+ String ipAddress,
+ int port,
+ boolean useSSL,
+ String trustStore,
+ String trustStorePwd)
+ throws TTransportException {
+ super(
+ property
+ .getProtocolFactory()
+ .getProtocol(
+ useSSL
+ ? DeepCopyRpcTransportFactory.INSTANCE.getTransport(
+ ipAddress,
+ port,
+ property.getConnectionTimeoutMs(),
+ trustStore,
+ trustStorePwd)
+ : DeepCopyRpcTransportFactory.INSTANCE.getTransport(
+ ipAddress, port, property.getConnectionTimeoutMs())));
+ this.ipAddress = ipAddress;
+ this.port = port;
+ this.endPoint = new TEndPoint(ipAddress, port);
+ final TTransport transport = getInputProtocol().getTransport();
+ if (!transport.isOpen()) {
+ transport.open();
+ }
+ }
+
+ public String getIpAddress() {
+ return ipAddress;
+ }
+
+ public int getPort() {
+ return port;
+ }
+
+ public TEndPoint getEndPoint() {
+ return endPoint;
+ }
+
+ public void setTimeout(int timeout) {
+ ((TimeoutChangeableTransport) (getInputProtocol().getTransport())).setTimeout(timeout);
+ }
+
+ @Override
+ public TPipeTransferResp pipeTransfer(final TPipeTransferReq req) throws TException {
+ final int bodySizeLimit = (int) (PipeRuntimeOptions.THRIFT_FRAME_MAX_SIZE.value() * 0.8);
+ if (req.getVersion() != IoTDBConnectorRequestVersion.VERSION_1.getVersion()
+ || req.body.limit() < bodySizeLimit) {
+ return super.pipeTransfer(req);
+ }
+
+ LOGGER.warn(
+ "The body size of the request is too large. The request will be sliced. Origin req: {}-{}. "
+ + "Request body size: {}, threshold: {}",
+ req.getVersion(),
+ req.getType(),
+ req.body.limit(),
+ bodySizeLimit);
+
+ try {
+ final int sliceOrderId = SLICE_ORDER_ID_GENERATOR.getAndIncrement();
+ // Slice the buffer to avoid the buffer being too large
+ final int sliceCount =
+ req.body.limit() / bodySizeLimit + (req.body.limit() % bodySizeLimit == 0 ? 0 : 1);
+ for (int i = 0; i < sliceCount; ++i) {
+ final int startIndexInBody = i * bodySizeLimit;
+ final int endIndexInBody = Math.min((i + 1) * bodySizeLimit, req.body.limit());
+ final TPipeTransferResp sliceResp =
+ super.pipeTransfer(
+ PipeTransferSliceReq.toTPipeTransferReq(
+ sliceOrderId,
+ req.getType(),
+ i,
+ sliceCount,
+ req.body.duplicate(),
+ startIndexInBody,
+ endIndexInBody));
+
+ if (i == sliceCount - 1) {
+ return sliceResp;
+ }
+
+ if (sliceResp.getStatus().getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
+ throw new PipeConnectionException(
+ String.format(
+ "Failed to transfer slice. Origin req: %s-%s, slice index: %d, slice count: %d. Reason: %s",
+ req.getVersion(), req.getType(), i, sliceCount, sliceResp.getStatus()));
+ }
+ }
+
+ // Should not reach here
+ return super.pipeTransfer(req);
+ } catch (final Exception e) {
+ LOGGER.warn(
+ "Failed to transfer slice. Origin req: {}-{}. Retry the whole transfer.",
+ req.getVersion(),
+ req.getType(),
+ e);
+ // Fall back to the original behavior
+ return super.pipeTransfer(req);
+ }
+ }
+
+ @Override
+ public void close() throws Exception {
+ invalidate();
+ }
+
+ @Override
+ public void invalidate() {
+ if (getInputProtocol().getTransport().isOpen()) {
+ getInputProtocol().getTransport().close();
+ }
+ }
+
+ @Override
+ public void invalidateAll() {
+ invalidate();
+ }
+
+ @Override
+ public boolean printLogWhenEncounterException() {
+ return true;
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/IoTDBSyncClientManager.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/IoTDBSyncClientManager.java
new file mode 100644
index 0000000..fadb2d2
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/IoTDBSyncClientManager.java
@@ -0,0 +1,373 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.client;
+
+import org.apache.iotdb.collector.config.PipeRuntimeOptions;
+import org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeTransferHandshakeConstant;
+import org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request.PipeTransferHandshakeV1Req;
+import org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request.PipeTransferHandshakeV2Req;
+import org.apache.iotdb.common.rpc.thrift.TEndPoint;
+import org.apache.iotdb.pipe.api.exception.PipeConnectionException;
+import org.apache.iotdb.rpc.TSStatusCode;
+import org.apache.iotdb.service.rpc.thrift.TPipeTransferResp;
+
+import org.apache.tsfile.utils.Pair;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_LOAD_BALANCE_PRIORITY_STRATEGY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_LOAD_BALANCE_RANDOM_STRATEGY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_LOAD_BALANCE_ROUND_ROBIN_STRATEGY;
+
+public abstract class IoTDBSyncClientManager extends IoTDBClientManager implements Closeable {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBSyncClientManager.class);
+
+ private final boolean useSSL;
+ private final String trustStorePath;
+ private final String trustStorePwd;
+
+ protected final Map> endPoint2ClientAndStatus =
+ new ConcurrentHashMap<>();
+ private final Map endPoint2HandshakeErrorMessage = new ConcurrentHashMap<>();
+
+ private final LoadBalancer loadBalancer;
+
+ protected IoTDBSyncClientManager(
+ List endPoints,
+ boolean useSSL,
+ String trustStorePath,
+ String trustStorePwd,
+ /* The following parameters are used locally. */
+ boolean useLeaderCache,
+ String loadBalanceStrategy,
+ /* The following parameters are used to handshake with the receiver. */
+ String username,
+ String password,
+ boolean shouldReceiverConvertOnTypeMismatch,
+ String loadTsFileStrategy,
+ boolean validateTsFile,
+ boolean shouldMarkAsPipeRequest) {
+ super(
+ endPoints,
+ useLeaderCache,
+ username,
+ password,
+ shouldReceiverConvertOnTypeMismatch,
+ loadTsFileStrategy,
+ validateTsFile,
+ shouldMarkAsPipeRequest);
+
+ this.useSSL = useSSL;
+ this.trustStorePath = trustStorePath;
+ this.trustStorePwd = trustStorePwd;
+
+ for (final TEndPoint endPoint : endPoints) {
+ endPoint2ClientAndStatus.put(endPoint, new Pair<>(null, false));
+ }
+
+ switch (loadBalanceStrategy) {
+ case CONNECTOR_LOAD_BALANCE_ROUND_ROBIN_STRATEGY:
+ loadBalancer = new RoundRobinLoadBalancer();
+ break;
+ case CONNECTOR_LOAD_BALANCE_RANDOM_STRATEGY:
+ loadBalancer = new RandomLoadBalancer();
+ break;
+ case CONNECTOR_LOAD_BALANCE_PRIORITY_STRATEGY:
+ loadBalancer = new PriorityLoadBalancer();
+ break;
+ default:
+ LOGGER.warn(
+ "Unknown load balance strategy: {}, use round-robin strategy instead.",
+ loadBalanceStrategy);
+ loadBalancer = new RoundRobinLoadBalancer();
+ }
+ }
+
+ public void checkClientStatusAndTryReconstructIfNecessary() {
+ // Reconstruct all dead clients
+ for (final Map.Entry> entry :
+ endPoint2ClientAndStatus.entrySet()) {
+ if (Boolean.TRUE.equals(entry.getValue().getRight())) {
+ continue;
+ }
+
+ reconstructClient(entry.getKey());
+ }
+
+ // Check whether any clients are available
+ for (final Pair clientAndStatus : endPoint2ClientAndStatus.values()) {
+ if (Boolean.TRUE.equals(clientAndStatus.getRight())) {
+ return;
+ }
+ }
+
+ // If all clients are not available, throw an exception
+ final StringBuilder errorMessage =
+ new StringBuilder(
+ String.format(
+ "All target servers %s are not available.", endPoint2ClientAndStatus.keySet()));
+ for (final Map.Entry entry : endPoint2HandshakeErrorMessage.entrySet()) {
+ errorMessage
+ .append(" (")
+ .append("host: ")
+ .append(entry.getKey().getIp())
+ .append(", port: ")
+ .append(entry.getKey().getPort())
+ .append(", because: ")
+ .append(entry.getValue())
+ .append(")");
+ }
+ throw new PipeConnectionException(errorMessage.toString());
+ }
+
+ protected void reconstructClient(TEndPoint endPoint) {
+ endPoint2HandshakeErrorMessage.remove(endPoint);
+
+ final Pair clientAndStatus = endPoint2ClientAndStatus.get(endPoint);
+
+ if (clientAndStatus.getLeft() != null) {
+ try {
+ clientAndStatus.getLeft().close();
+ } catch (Exception e) {
+ LOGGER.warn(
+ "Failed to close client with target server ip: {}, port: {}, because: {}. Ignore it.",
+ endPoint.getIp(),
+ endPoint.getPort(),
+ e.getMessage());
+ }
+ }
+
+ // It is necessary to ensure that the client is initialized successfully and not null. If false
+ // is returned, it means that the initialization is not successful and the handshake operation
+ // is not performed.
+ if (initClientAndStatus(clientAndStatus, endPoint)) {
+ sendHandshakeReq(clientAndStatus);
+ }
+ }
+
+ private boolean initClientAndStatus(
+ final Pair clientAndStatus, final TEndPoint endPoint) {
+ try {
+ clientAndStatus.setLeft(
+ new IoTDBSyncClient(
+ new ThriftClientProperty.Builder()
+ .setConnectionTimeoutMs(
+ PipeRuntimeOptions.PIPE_CONNECTOR_TRANSFER_TIMEOUT_MS.value())
+ .setRpcThriftCompressionEnabled(
+ PipeRuntimeOptions.IS_PIPE_CONNECTOR_RPC_THRIFT_COMPRESSION_ENABLED.value())
+ .build(),
+ endPoint.getIp(),
+ endPoint.getPort(),
+ useSSL,
+ trustStorePath,
+ trustStorePwd));
+ return true;
+ } catch (Exception e) {
+ endPoint2HandshakeErrorMessage.put(endPoint, e.getMessage());
+ LOGGER.warn(
+ "Failed to initialize client with target server ip: {}, port: {}, because {}",
+ endPoint.getIp(),
+ endPoint.getPort(),
+ e.getMessage(),
+ e);
+ return false;
+ }
+ }
+
+ public void sendHandshakeReq(final Pair clientAndStatus) {
+ final IoTDBSyncClient client = clientAndStatus.getLeft();
+ try {
+ final HashMap params = new HashMap<>();
+ params.put(
+ PipeTransferHandshakeConstant.HANDSHAKE_KEY_TIME_PRECISION,
+ PipeRuntimeOptions.TIMESTAMP_PRECISION.value());
+ params.put(PipeTransferHandshakeConstant.HANDSHAKE_KEY_CLUSTER_ID, getClusterId());
+ params.put(
+ PipeTransferHandshakeConstant.HANDSHAKE_KEY_CONVERT_ON_TYPE_MISMATCH,
+ Boolean.toString(shouldReceiverConvertOnTypeMismatch));
+ params.put(
+ PipeTransferHandshakeConstant.HANDSHAKE_KEY_LOAD_TSFILE_STRATEGY, loadTsFileStrategy);
+ params.put(PipeTransferHandshakeConstant.HANDSHAKE_KEY_USERNAME, username);
+ params.put(PipeTransferHandshakeConstant.HANDSHAKE_KEY_PASSWORD, password);
+ params.put(
+ PipeTransferHandshakeConstant.HANDSHAKE_KEY_VALIDATE_TSFILE,
+ Boolean.toString(validateTsFile));
+ params.put(
+ PipeTransferHandshakeConstant.HANDSHAKE_KEY_MARK_AS_PIPE_REQUEST,
+ Boolean.toString(shouldMarkAsPipeRequest));
+
+ // Try to handshake by PipeTransferHandshakeV2Req.
+ TPipeTransferResp resp = client.pipeTransfer(buildHandshakeV2Req(params));
+ // Receiver may be an old version, so we need to retry to handshake by
+ // PipeTransferHandshakeV1Req.
+ if (resp.getStatus().getCode() == TSStatusCode.PIPE_TYPE_ERROR.getStatusCode()) {
+ LOGGER.warn(
+ "Handshake error with target server ip: {}, port: {}, because: {}. "
+ + "Retry to handshake by PipeTransferHandshakeV1Req.",
+ client.getIpAddress(),
+ client.getPort(),
+ resp.getStatus());
+ supportModsIfIsDataNodeReceiver = false;
+ resp = client.pipeTransfer(buildHandshakeV1Req());
+ }
+
+ if (resp.getStatus().getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
+ LOGGER.warn(
+ "Handshake error with target server ip: {}, port: {}, because: {}.",
+ client.getIpAddress(),
+ client.getPort(),
+ resp.getStatus());
+ endPoint2HandshakeErrorMessage.put(client.getEndPoint(), resp.getStatus().getMessage());
+ } else {
+ clientAndStatus.setRight(true);
+ client.setTimeout(CONNECTION_TIMEOUT_MS.get());
+ LOGGER.info(
+ "Handshake success. Target server ip: {}, port: {}",
+ client.getIpAddress(),
+ client.getPort());
+ }
+ } catch (Exception e) {
+ LOGGER.warn(
+ "Handshake error with target server ip: {}, port: {}, because: {}.",
+ client.getIpAddress(),
+ client.getPort(),
+ e.getMessage(),
+ e);
+ endPoint2HandshakeErrorMessage.put(client.getEndPoint(), e.getMessage());
+ }
+ }
+
+ protected abstract PipeTransferHandshakeV1Req buildHandshakeV1Req() throws IOException;
+
+ protected abstract PipeTransferHandshakeV2Req buildHandshakeV2Req(Map params)
+ throws IOException;
+
+ protected abstract String getClusterId();
+
+ public Pair getClient() {
+ return loadBalancer.getClient();
+ }
+
+ @Override
+ public void close() {
+ for (final Map.Entry> entry :
+ endPoint2ClientAndStatus.entrySet()) {
+ final TEndPoint endPoint = entry.getKey();
+ final Pair clientAndStatus = entry.getValue();
+
+ if (clientAndStatus == null) {
+ continue;
+ }
+
+ try {
+ if (clientAndStatus.getLeft() != null) {
+ clientAndStatus.getLeft().close();
+ clientAndStatus.setLeft(null);
+ }
+ LOGGER.info("Client {}:{} closed.", endPoint.getIp(), endPoint.getPort());
+ } catch (Exception e) {
+ LOGGER.warn(
+ "Failed to close client {}:{}, because: {}.",
+ endPoint.getIp(),
+ endPoint.getPort(),
+ e.getMessage(),
+ e);
+ } finally {
+ clientAndStatus.setRight(false);
+ }
+ }
+ }
+
+ /////////////////////// Strategies for load balance //////////////////////////
+
+ private interface LoadBalancer {
+ Pair getClient();
+ }
+
+ private class RoundRobinLoadBalancer implements LoadBalancer {
+ @Override
+ public Pair getClient() {
+ final int clientSize = endPointList.size();
+ // Round-robin, find the next alive client
+ for (int tryCount = 0; tryCount < clientSize; ++tryCount) {
+ final int clientIndex = (int) (currentClientIndex++ % clientSize);
+ final Pair clientAndStatus =
+ endPoint2ClientAndStatus.get(endPointList.get(clientIndex));
+ if (Boolean.TRUE.equals(clientAndStatus.getRight())) {
+ return clientAndStatus;
+ }
+ }
+
+ throw new PipeConnectionException(
+ "All clients are dead, please check the connection to the receiver.");
+ }
+ }
+
+ private class RandomLoadBalancer implements LoadBalancer {
+ @Override
+ public Pair getClient() {
+ final int clientSize = endPointList.size();
+ final int clientIndex = (int) (Math.random() * clientSize);
+ final Pair clientAndStatus =
+ endPoint2ClientAndStatus.get(endPointList.get(clientIndex));
+ if (Boolean.TRUE.equals(clientAndStatus.getRight())) {
+ return clientAndStatus;
+ }
+
+ // Random, find the next alive client
+ for (int tryCount = 0; tryCount < clientSize - 1; ++tryCount) {
+ final int nextClientIndex = (clientIndex + tryCount + 1) % clientSize;
+ final Pair nextClientAndStatus =
+ endPoint2ClientAndStatus.get(endPointList.get(nextClientIndex));
+ if (Boolean.TRUE.equals(nextClientAndStatus.getRight())) {
+ return nextClientAndStatus;
+ }
+ }
+
+ throw new PipeConnectionException(
+ "All clients are dead, please check the connection to the receiver.");
+ }
+ }
+
+ private class PriorityLoadBalancer implements LoadBalancer {
+ @Override
+ public Pair getClient() {
+ // Priority, find the first alive client
+ for (final TEndPoint endPoint : endPointList) {
+ final Pair clientAndStatus =
+ endPoint2ClientAndStatus.get(endPoint);
+ if (Boolean.TRUE.equals(clientAndStatus.getRight())) {
+ return clientAndStatus;
+ }
+ }
+
+ throw new PipeConnectionException(
+ "All clients are dead, please check the connection to the receiver.");
+ }
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/ThriftClient.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/ThriftClient.java
new file mode 100644
index 0000000..20061ef
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/ThriftClient.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.client;
+
+import org.apache.commons.lang3.exception.ExceptionUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.transport.TTransportException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
+import java.net.ConnectException;
+import java.net.SocketException;
+import java.util.Optional;
+
+/**
+ * This class defines the failed interfaces that thrift client needs to support so that the Thrift
+ * Client can clean up the clientManager when it receives the corresponding exception.
+ */
+public interface ThriftClient {
+
+ Logger logger = LoggerFactory.getLogger(ThriftClient.class);
+
+ /** Close this connection. */
+ void invalidate();
+
+ /** Removing all pooled instances corresponding to current instance's endpoint. */
+ void invalidateAll();
+
+ /**
+ * Whether to print logs when exceptions are encountered.
+ *
+ * @return result
+ */
+ boolean printLogWhenEncounterException();
+
+ /**
+ * Perform corresponding operations on ThriftClient o based on the Throwable t.
+ *
+ * @param t Throwable
+ * @param o ThriftClient
+ */
+ static void resolveException(Throwable t, ThriftClient o) {
+ Throwable origin = t;
+ if (t instanceof InvocationTargetException) {
+ origin = ((InvocationTargetException) t).getTargetException();
+ }
+ Throwable cur = origin;
+ if (cur instanceof TException) {
+ int level = 0;
+ while (cur != null) {
+ logger.debug(
+ "level-{} Exception class {}, message {}",
+ level,
+ cur.getClass().getName(),
+ cur.getMessage());
+ cur = cur.getCause();
+ level++;
+ }
+ o.invalidate();
+ }
+
+ Throwable rootCause = ExceptionUtils.getRootCause(origin);
+ if (rootCause != null) {
+ // if the exception is SocketException and its error message is Broken pipe, it means that
+ // the remote node may restart and all the connection we cached before should be cleared.
+ logger.debug(
+ "root cause message {}, LocalizedMessage {}, ",
+ rootCause.getMessage(),
+ rootCause.getLocalizedMessage(),
+ rootCause);
+ if (isConnectionBroken(rootCause)) {
+ if (o.printLogWhenEncounterException()) {
+ logger.info(
+ "Broken pipe error happened in sending RPC,"
+ + " we need to clear all previous cached connection, error msg is {}",
+ rootCause.toString());
+ }
+ o.invalidateAll();
+ }
+ }
+ }
+
+ /**
+ * Determine whether the target node has gone offline once based on the cause.
+ *
+ * @param cause Throwable
+ * @return true/false
+ */
+ static boolean isConnectionBroken(Throwable cause) {
+ return (cause instanceof SocketException && cause.getMessage().contains("Broken pipe"))
+ || (cause instanceof TTransportException
+ && (hasExpectedMessage(cause, "Socket is closed by peer")
+ || hasExpectedMessage(cause, "Read call frame size failed")))
+ || (cause instanceof IOException
+ && (hasExpectedMessage(cause, "Connection reset by peer")
+ || hasExpectedMessage(cause, "Broken pipe")))
+ || (cause instanceof ConnectException && hasExpectedMessage(cause, "Connection refused"));
+ }
+
+ static boolean hasExpectedMessage(Throwable cause, String expectedMessage) {
+ return Optional.ofNullable(cause.getMessage())
+ .map(m -> m.contains(expectedMessage))
+ .orElse(false);
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/ThriftClientProperty.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/ThriftClientProperty.java
new file mode 100644
index 0000000..b85c273
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/client/ThriftClientProperty.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.client;
+
+import org.apache.thrift.protocol.TBinaryProtocol;
+import org.apache.thrift.protocol.TCompactProtocol;
+import org.apache.thrift.protocol.TProtocolFactory;
+
+import java.util.concurrent.TimeUnit;
+
+/** This class defines the configurations commonly used by the Thrift Client. */
+public class ThriftClientProperty {
+
+ private final TProtocolFactory protocolFactory;
+ private final int connectionTimeoutMs;
+ private final int selectorNumOfAsyncClientPool;
+ private final boolean printLogWhenEncounterException;
+
+ private ThriftClientProperty(
+ TProtocolFactory protocolFactory,
+ int connectionTimeoutMs,
+ int selectorNumOfAsyncClientPool,
+ boolean printLogWhenEncounterException) {
+ this.protocolFactory = protocolFactory;
+ this.connectionTimeoutMs = connectionTimeoutMs;
+ this.selectorNumOfAsyncClientPool = selectorNumOfAsyncClientPool;
+ this.printLogWhenEncounterException = printLogWhenEncounterException;
+ }
+
+ public TProtocolFactory getProtocolFactory() {
+ return protocolFactory;
+ }
+
+ public int getConnectionTimeoutMs() {
+ return connectionTimeoutMs;
+ }
+
+ public int getSelectorNumOfAsyncClientPool() {
+ return selectorNumOfAsyncClientPool;
+ }
+
+ public boolean isPrintLogWhenEncounterException() {
+ return printLogWhenEncounterException;
+ }
+
+ public static class Builder {
+
+ /** whether to use thrift compression. */
+ private boolean rpcThriftCompressionEnabled = DefaultProperty.RPC_THRIFT_COMPRESSED_ENABLED;
+
+ /** socket timeout for thrift client. */
+ private int connectionTimeoutMs = DefaultProperty.CONNECTION_TIMEOUT_MS;
+
+ /** number of selector threads for asynchronous thrift client in a clientManager. */
+ private int selectorNumOfAsyncClientManager =
+ DefaultProperty.SELECTOR_NUM_OF_ASYNC_CLIENT_MANAGER;
+
+ /**
+ * Whether to print logs when the client encounters exceptions. For example, logs are not
+ * printed in the heartbeat client.
+ */
+ private boolean printLogWhenEncounterException =
+ DefaultProperty.PRINT_LOG_WHEN_ENCOUNTER_EXCEPTION;
+
+ public Builder setRpcThriftCompressionEnabled(boolean rpcThriftCompressionEnabled) {
+ this.rpcThriftCompressionEnabled = rpcThriftCompressionEnabled;
+ return this;
+ }
+
+ public Builder setConnectionTimeoutMs(int connectionTimeoutMs) {
+ this.connectionTimeoutMs = connectionTimeoutMs;
+ return this;
+ }
+
+ public Builder setSelectorNumOfAsyncClientManager(int selectorNumOfAsyncClientManager) {
+ this.selectorNumOfAsyncClientManager = selectorNumOfAsyncClientManager;
+ return this;
+ }
+
+ public Builder setPrintLogWhenEncounterException(boolean printLogWhenEncounterException) {
+ this.printLogWhenEncounterException = printLogWhenEncounterException;
+ return this;
+ }
+
+ public ThriftClientProperty build() {
+ return new ThriftClientProperty(
+ rpcThriftCompressionEnabled
+ ? new TCompactProtocol.Factory()
+ : new TBinaryProtocol.Factory(),
+ connectionTimeoutMs,
+ selectorNumOfAsyncClientManager,
+ printLogWhenEncounterException);
+ }
+ }
+
+ public static class DefaultProperty {
+
+ private DefaultProperty() {}
+
+ public static final boolean RPC_THRIFT_COMPRESSED_ENABLED = false;
+ public static final int CONNECTION_TIMEOUT_MS = (int) TimeUnit.SECONDS.toMillis(20);
+ public static final int CONNECTION_NEVER_TIMEOUT_MS = 0;
+ public static final int SELECTOR_NUM_OF_ASYNC_CLIENT_MANAGER = 1;
+ public static final boolean PRINT_LOG_WHEN_ENCOUNTER_EXCEPTION = true;
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeCompressor.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeCompressor.java
new file mode 100644
index 0000000..dc3f1e7
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeCompressor.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.compressor;
+
+import java.io.IOException;
+
+public abstract class PipeCompressor {
+
+ public enum PipeCompressionType {
+ SNAPPY((byte) 0),
+ GZIP((byte) 1),
+ LZ4((byte) 2),
+ ZSTD((byte) 3),
+ LZMA2((byte) 4);
+
+ final byte index;
+
+ PipeCompressionType(byte index) {
+ this.index = index;
+ }
+
+ public byte getIndex() {
+ return index;
+ }
+ }
+
+ private final PipeCompressionType compressionType;
+
+ protected PipeCompressor(PipeCompressionType compressionType) {
+ this.compressionType = compressionType;
+ }
+
+ public abstract byte[] compress(byte[] data) throws IOException;
+
+ /**
+ * Decompress the byte array to a byte array. NOTE: the length of the decompressed byte array is
+ * not provided in this method, and some decompressors (LZ4) may construct large byte arrays,
+ * leading to potential OOM.
+ *
+ * @param byteArray the byte array to be decompressed
+ * @return the decompressed byte array
+ * @throws IOException
+ */
+ public abstract byte[] decompress(byte[] byteArray) throws IOException;
+
+ /**
+ * Decompress the byte array to a byte array with a known length.
+ *
+ * @param byteArray the byte array to be decompressed
+ * @param decompressedLength the length of the decompressed byte array
+ * @return the decompressed byte array
+ * @throws IOException
+ */
+ public abstract byte[] decompress(byte[] byteArray, int decompressedLength) throws IOException;
+
+ public byte serialize() {
+ return compressionType.getIndex();
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeCompressorConfig.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeCompressorConfig.java
new file mode 100644
index 0000000..fc54f3a
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeCompressorConfig.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.compressor;
+
+public class PipeCompressorConfig {
+
+ private final String name;
+ private final int zstdCompressionLevel;
+
+ public PipeCompressorConfig(String name, int zstdCompressionLevel) {
+ this.name = name;
+ this.zstdCompressionLevel = zstdCompressionLevel;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public int getZstdCompressionLevel() {
+ return zstdCompressionLevel;
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeCompressorFactory.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeCompressorFactory.java
new file mode 100644
index 0000000..e2da42f
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeCompressorFactory.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.compressor;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_COMPRESSOR_GZIP;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_COMPRESSOR_LZ4;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_COMPRESSOR_LZMA2;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_COMPRESSOR_SNAPPY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_COMPRESSOR_ZSTD;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_COMPRESSOR_ZSTD_LEVEL_DEFAULT_VALUE;
+
+public class PipeCompressorFactory {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(PipeCompressorFactory.class);
+
+ private static final Map COMPRESSOR_NAME_TO_INSTANCE =
+ new ConcurrentHashMap<>();
+
+ static {
+ COMPRESSOR_NAME_TO_INSTANCE.put(CONNECTOR_COMPRESSOR_SNAPPY, new PipeSnappyCompressor());
+ COMPRESSOR_NAME_TO_INSTANCE.put(CONNECTOR_COMPRESSOR_GZIP, new PipeGZIPCompressor());
+ COMPRESSOR_NAME_TO_INSTANCE.put(CONNECTOR_COMPRESSOR_LZ4, new PipeLZ4Compressor());
+ COMPRESSOR_NAME_TO_INSTANCE.put(
+ CONNECTOR_COMPRESSOR_ZSTD,
+ new PipeZSTDCompressor(CONNECTOR_COMPRESSOR_ZSTD_LEVEL_DEFAULT_VALUE));
+ COMPRESSOR_NAME_TO_INSTANCE.put(CONNECTOR_COMPRESSOR_LZMA2, new PipeLZMA2Compressor());
+ }
+
+ public static PipeCompressor getCompressor(PipeCompressorConfig config) {
+ if (config == null) {
+ throw new IllegalArgumentException("PipeCompressorConfig is null");
+ }
+ if (config.getName() == null) {
+ throw new IllegalArgumentException("PipeCompressorConfig.getName() is null");
+ }
+
+ final String compressorName = config.getName();
+
+ // For ZSTD compressor, we need to consider the compression level
+ if (compressorName.equals(CONNECTOR_COMPRESSOR_ZSTD)) {
+ final int zstdCompressionLevel = config.getZstdCompressionLevel();
+ return COMPRESSOR_NAME_TO_INSTANCE.computeIfAbsent(
+ CONNECTOR_COMPRESSOR_ZSTD + "_" + zstdCompressionLevel,
+ key -> {
+ LOGGER.info("Create new PipeZSTDCompressor with level: {}", zstdCompressionLevel);
+ return new PipeZSTDCompressor(zstdCompressionLevel);
+ });
+ }
+
+ // For other compressors, we can directly get the instance by name
+ final PipeCompressor compressor = COMPRESSOR_NAME_TO_INSTANCE.get(compressorName);
+ if (compressor != null) {
+ return compressor;
+ }
+
+ throw new UnsupportedOperationException("PipeCompressor not found for name: " + compressorName);
+ }
+
+ private static Map COMPRESSOR_INDEX_TO_INSTANCE = new HashMap<>();
+
+ static {
+ COMPRESSOR_INDEX_TO_INSTANCE.put(
+ PipeCompressor.PipeCompressionType.SNAPPY.getIndex(),
+ COMPRESSOR_NAME_TO_INSTANCE.get(CONNECTOR_COMPRESSOR_SNAPPY));
+ COMPRESSOR_INDEX_TO_INSTANCE.put(
+ PipeCompressor.PipeCompressionType.GZIP.getIndex(),
+ COMPRESSOR_NAME_TO_INSTANCE.get(CONNECTOR_COMPRESSOR_GZIP));
+ COMPRESSOR_INDEX_TO_INSTANCE.put(
+ PipeCompressor.PipeCompressionType.LZ4.getIndex(),
+ COMPRESSOR_NAME_TO_INSTANCE.get(CONNECTOR_COMPRESSOR_LZ4));
+ COMPRESSOR_INDEX_TO_INSTANCE.put(
+ PipeCompressor.PipeCompressionType.ZSTD.getIndex(),
+ COMPRESSOR_NAME_TO_INSTANCE.get(CONNECTOR_COMPRESSOR_ZSTD));
+ COMPRESSOR_INDEX_TO_INSTANCE.put(
+ PipeCompressor.PipeCompressionType.LZMA2.getIndex(),
+ COMPRESSOR_NAME_TO_INSTANCE.get(CONNECTOR_COMPRESSOR_LZMA2));
+ COMPRESSOR_INDEX_TO_INSTANCE = Collections.unmodifiableMap(COMPRESSOR_INDEX_TO_INSTANCE);
+ }
+
+ public static PipeCompressor getCompressor(byte index) {
+ final PipeCompressor compressor = COMPRESSOR_INDEX_TO_INSTANCE.get(index);
+ if (compressor == null) {
+ throw new UnsupportedOperationException("PipeCompressor not found for index: " + index);
+ }
+ return compressor;
+ }
+
+ private PipeCompressorFactory() {
+ // Empty constructor
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeGZIPCompressor.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeGZIPCompressor.java
new file mode 100644
index 0000000..7fbfc34
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeGZIPCompressor.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.compressor;
+
+import org.apache.iotdb.tsfile.compress.ICompressor;
+import org.apache.iotdb.tsfile.compress.IUnCompressor;
+import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
+
+import java.io.IOException;
+
+public class PipeGZIPCompressor extends PipeCompressor {
+
+ private static final ICompressor COMPRESSOR = ICompressor.getCompressor(CompressionType.GZIP);
+ private static final IUnCompressor DECOMPRESSOR =
+ IUnCompressor.getUnCompressor(CompressionType.GZIP);
+
+ public PipeGZIPCompressor() {
+ super(PipeCompressionType.GZIP);
+ }
+
+ @Override
+ public byte[] compress(byte[] data) throws IOException {
+ return COMPRESSOR.compress(data);
+ }
+
+ @Override
+ public byte[] decompress(byte[] byteArray) throws IOException {
+ return DECOMPRESSOR.uncompress(byteArray);
+ }
+
+ @Override
+ public byte[] decompress(byte[] byteArray, int decompressedLength) throws IOException {
+ byte[] uncompressed = new byte[decompressedLength];
+ DECOMPRESSOR.uncompress(byteArray, 0, byteArray.length, uncompressed, 0);
+ return uncompressed;
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeLZ4Compressor.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeLZ4Compressor.java
new file mode 100644
index 0000000..725b15e
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeLZ4Compressor.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.compressor;
+
+import org.apache.iotdb.tsfile.compress.ICompressor;
+import org.apache.iotdb.tsfile.compress.IUnCompressor;
+import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
+
+import java.io.IOException;
+
+public class PipeLZ4Compressor extends PipeCompressor {
+
+ private static final ICompressor COMPRESSOR = ICompressor.getCompressor(CompressionType.LZ4);
+ private static final IUnCompressor DECOMPRESSOR =
+ IUnCompressor.getUnCompressor(CompressionType.LZ4);
+
+ public PipeLZ4Compressor() {
+ super(PipeCompressionType.LZ4);
+ }
+
+ @Override
+ public byte[] compress(byte[] data) throws IOException {
+ return COMPRESSOR.compress(data);
+ }
+
+ @Override
+ public byte[] decompress(byte[] byteArray) throws IOException {
+ return DECOMPRESSOR.uncompress(byteArray);
+ }
+
+ @Override
+ public byte[] decompress(byte[] byteArray, int decompressedLength) throws IOException {
+ byte[] uncompressed = new byte[decompressedLength];
+ DECOMPRESSOR.uncompress(byteArray, 0, byteArray.length, uncompressed, 0);
+ return uncompressed;
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeLZMA2Compressor.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeLZMA2Compressor.java
new file mode 100644
index 0000000..2fd459c
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeLZMA2Compressor.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.compressor;
+
+import org.apache.iotdb.tsfile.compress.ICompressor;
+import org.apache.iotdb.tsfile.compress.IUnCompressor;
+import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
+
+import java.io.IOException;
+
+public class PipeLZMA2Compressor extends PipeCompressor {
+
+ private static final ICompressor COMPRESSOR = ICompressor.getCompressor(CompressionType.LZMA2);
+ private static final IUnCompressor DECOMPRESSOR =
+ IUnCompressor.getUnCompressor(CompressionType.LZMA2);
+
+ public PipeLZMA2Compressor() {
+ super(PipeCompressionType.LZMA2);
+ }
+
+ @Override
+ public byte[] compress(byte[] data) throws IOException {
+ return COMPRESSOR.compress(data);
+ }
+
+ @Override
+ public byte[] decompress(byte[] byteArray) throws IOException {
+ return DECOMPRESSOR.uncompress(byteArray);
+ }
+
+ @Override
+ public byte[] decompress(byte[] byteArray, int decompressedLength) throws IOException {
+ byte[] uncompressed = new byte[decompressedLength];
+ DECOMPRESSOR.uncompress(byteArray, 0, byteArray.length, uncompressed, 0);
+ return uncompressed;
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeSnappyCompressor.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeSnappyCompressor.java
new file mode 100644
index 0000000..101b445
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeSnappyCompressor.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.compressor;
+
+import org.apache.iotdb.tsfile.compress.ICompressor;
+import org.apache.iotdb.tsfile.compress.IUnCompressor;
+import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
+
+import java.io.IOException;
+
+public class PipeSnappyCompressor extends PipeCompressor {
+
+ private static final ICompressor COMPRESSOR = ICompressor.getCompressor(CompressionType.SNAPPY);
+ private static final IUnCompressor DECOMPRESSOR =
+ IUnCompressor.getUnCompressor(CompressionType.SNAPPY);
+
+ public PipeSnappyCompressor() {
+ super(PipeCompressionType.SNAPPY);
+ }
+
+ @Override
+ public byte[] compress(byte[] data) throws IOException {
+ return COMPRESSOR.compress(data);
+ }
+
+ @Override
+ public byte[] decompress(byte[] byteArray) throws IOException {
+ return DECOMPRESSOR.uncompress(byteArray);
+ }
+
+ @Override
+ public byte[] decompress(byte[] byteArray, int decompressedLength) throws IOException {
+ byte[] uncompressed = new byte[decompressedLength];
+ DECOMPRESSOR.uncompress(byteArray, 0, byteArray.length, uncompressed, 0);
+ return uncompressed;
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeZSTDCompressor.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeZSTDCompressor.java
new file mode 100644
index 0000000..67e40ac
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/compressor/PipeZSTDCompressor.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.compressor;
+
+import com.github.luben.zstd.Zstd;
+
+import java.io.IOException;
+
+public class PipeZSTDCompressor extends PipeCompressor {
+
+ private final int compressionLevel;
+
+ public PipeZSTDCompressor(int compressionLevel) {
+ super(PipeCompressionType.ZSTD);
+ this.compressionLevel = compressionLevel;
+ }
+
+ @Override
+ public byte[] compress(byte[] data) throws IOException {
+ return Zstd.compress(data, compressionLevel);
+ }
+
+ @Override
+ public byte[] decompress(byte[] byteArray) {
+ return Zstd.decompress(byteArray, (int) Zstd.decompressedSize(byteArray, 0, byteArray.length));
+ }
+
+ @Override
+ public byte[] decompress(byte[] byteArray, int decompressedLength) {
+ return Zstd.decompress(byteArray, decompressedLength);
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/constant/ColumnHeaderConstant.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/constant/ColumnHeaderConstant.java
new file mode 100644
index 0000000..5d4db67
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/constant/ColumnHeaderConstant.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.constant;
+
+public class ColumnHeaderConstant {
+
+ private ColumnHeaderConstant() {
+ // forbidding instantiation
+ }
+
+ public static final String DATABASE = "Database";
+
+ public static final String PATH_PATTERN = "PathPattern";
+
+ public static final String TYPE = "Type";
+
+ public static final String TABLE_NAME = "TableName";
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/constant/PipeConnectorConstant.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/constant/PipeConnectorConstant.java
new file mode 100644
index 0000000..2c65761
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/constant/PipeConnectorConstant.java
@@ -0,0 +1,261 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.constant;
+
+import com.github.luben.zstd.Zstd;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+
+public class PipeConnectorConstant {
+
+ public static final long MB = 1L << 20;
+
+ public static final String CONNECTOR_KEY = "connector";
+ public static final String SINK_KEY = "sink";
+
+ public static final String CONNECTOR_IOTDB_IP_KEY = "connector.ip";
+ public static final String SINK_IOTDB_IP_KEY = "sink.ip";
+ public static final String CONNECTOR_IOTDB_HOST_KEY = "connector.host";
+ public static final String SINK_IOTDB_HOST_KEY = "sink.host";
+ public static final String CONNECTOR_IOTDB_PORT_KEY = "connector.port";
+ public static final String SINK_IOTDB_PORT_KEY = "sink.port";
+ public static final String CONNECTOR_IOTDB_NODE_URLS_KEY = "connector.node-urls";
+ public static final String SINK_IOTDB_NODE_URLS_KEY = "sink.node-urls";
+
+ public static final String SINK_IOTDB_SSL_ENABLE_KEY = "sink.ssl.enable";
+ public static final String SINK_IOTDB_SSL_TRUST_STORE_PATH_KEY = "sink.ssl.trust-store-path";
+ public static final String SINK_IOTDB_SSL_TRUST_STORE_PWD_KEY = "sink.ssl.trust-store-pwd";
+
+ public static final String CONNECTOR_IOTDB_PARALLEL_TASKS_KEY = "connector.parallel.tasks";
+ public static final String SINK_IOTDB_PARALLEL_TASKS_KEY = "sink.parallel.tasks";
+
+ public static final String CONNECTOR_REALTIME_FIRST_KEY = "connector.realtime-first";
+ public static final String SINK_REALTIME_FIRST_KEY = "sink.realtime-first";
+ public static final boolean CONNECTOR_REALTIME_FIRST_DEFAULT_VALUE = true;
+
+ public static final String CONNECTOR_IOTDB_BATCH_MODE_ENABLE_KEY = "connector.batch.enable";
+ public static final String SINK_IOTDB_BATCH_MODE_ENABLE_KEY = "sink.batch.enable";
+ public static final boolean CONNECTOR_IOTDB_BATCH_MODE_ENABLE_DEFAULT_VALUE = true;
+
+ public static final String CONNECTOR_IOTDB_BATCH_DELAY_KEY = "connector.batch.max-delay-seconds";
+ public static final String SINK_IOTDB_BATCH_DELAY_KEY = "sink.batch.max-delay-seconds";
+ public static final int CONNECTOR_IOTDB_PLAIN_BATCH_DELAY_DEFAULT_VALUE = 1;
+ public static final int CONNECTOR_IOTDB_TS_FILE_BATCH_DELAY_DEFAULT_VALUE = 5;
+
+ public static final String CONNECTOR_IOTDB_BATCH_SIZE_KEY = "connector.batch.size-bytes";
+ public static final String SINK_IOTDB_BATCH_SIZE_KEY = "sink.batch.size-bytes";
+ public static final long CONNECTOR_IOTDB_PLAIN_BATCH_SIZE_DEFAULT_VALUE = 16 * MB;
+ public static final long CONNECTOR_IOTDB_TS_FILE_BATCH_SIZE_DEFAULT_VALUE = 80 * MB;
+
+ public static final String CONNECTOR_IOTDB_USER_KEY = "connector.user";
+ public static final String SINK_IOTDB_USER_KEY = "sink.user";
+ public static final String CONNECTOR_IOTDB_USERNAME_KEY = "connector.username";
+ public static final String SINK_IOTDB_USERNAME_KEY = "sink.username";
+ public static final String CONNECTOR_IOTDB_USER_DEFAULT_VALUE = "root";
+
+ public static final String CONNECTOR_IOTDB_PASSWORD_KEY = "connector.password";
+ public static final String SINK_IOTDB_PASSWORD_KEY = "sink.password";
+ public static final String CONNECTOR_IOTDB_PASSWORD_DEFAULT_VALUE = "root";
+
+ public static final String CONNECTOR_EXCEPTION_DATA_CONVERT_ON_TYPE_MISMATCH_KEY =
+ "connector.exception.data.convert-on-type-mismatch";
+ public static final String SINK_EXCEPTION_DATA_CONVERT_ON_TYPE_MISMATCH_KEY =
+ "sink.exception.data.convert-on-type-mismatch";
+ public static final boolean CONNECTOR_EXCEPTION_DATA_CONVERT_ON_TYPE_MISMATCH_DEFAULT_VALUE =
+ true;
+
+ public static final String CONNECTOR_EXCEPTION_CONFLICT_RESOLVE_STRATEGY_KEY =
+ "connector.exception.conflict.resolve-strategy";
+ public static final String SINK_EXCEPTION_CONFLICT_RESOLVE_STRATEGY_KEY =
+ "sink.exception.conflict.resolve-strategy";
+ public static final String CONNECTOR_EXCEPTION_CONFLICT_RESOLVE_STRATEGY_DEFAULT_VALUE = "retry";
+
+ public static final String CONNECTOR_EXCEPTION_CONFLICT_RETRY_MAX_TIME_SECONDS_KEY =
+ "connector.exception.conflict.retry-max-time-seconds";
+ public static final String SINK_EXCEPTION_CONFLICT_RETRY_MAX_TIME_SECONDS_KEY =
+ "sink.exception.conflict.retry-max-time-seconds";
+ public static final long CONNECTOR_EXCEPTION_CONFLICT_RETRY_MAX_TIME_SECONDS_DEFAULT_VALUE = 60;
+
+ public static final String CONNECTOR_EXCEPTION_CONFLICT_RECORD_IGNORED_DATA_KEY =
+ "connector.exception.conflict.record-ignored-data";
+ public static final String SINK_EXCEPTION_CONFLICT_RECORD_IGNORED_DATA_KEY =
+ "sink.exception.conflict.record-ignored-data";
+ public static final boolean CONNECTOR_EXCEPTION_CONFLICT_RECORD_IGNORED_DATA_DEFAULT_VALUE = true;
+
+ public static final String CONNECTOR_EXCEPTION_OTHERS_RETRY_MAX_TIME_SECONDS_KEY =
+ "connector.exception.others.retry-max-time-seconds";
+ public static final String SINK_EXCEPTION_OTHERS_RETRY_MAX_TIME_SECONDS_KEY =
+ "sink.exception.others.retry-max-time-seconds";
+ public static final long CONNECTOR_EXCEPTION_OTHERS_RETRY_MAX_TIME_SECONDS_DEFAULT_VALUE = -1;
+
+ public static final String CONNECTOR_EXCEPTION_OTHERS_RECORD_IGNORED_DATA_KEY =
+ "connector.exception.others.record-ignored-data";
+ public static final String SINK_EXCEPTION_OTHERS_RECORD_IGNORED_DATA_KEY =
+ "sink.exception.others.record-ignored-data";
+ public static final boolean CONNECTOR_EXCEPTION_OTHERS_RECORD_IGNORED_DATA_DEFAULT_VALUE = true;
+
+ public static final String CONNECTOR_AIR_GAP_E_LANGUAGE_ENABLE_KEY =
+ "connector.air-gap.e-language.enable";
+ public static final String SINK_AIR_GAP_E_LANGUAGE_ENABLE_KEY = "sink.air-gap.e-language.enable";
+ public static final boolean CONNECTOR_AIR_GAP_E_LANGUAGE_ENABLE_DEFAULT_VALUE = false;
+
+ public static final String CONNECTOR_AIR_GAP_HANDSHAKE_TIMEOUT_MS_KEY =
+ "connector.air-gap.handshake-timeout-ms";
+ public static final String SINK_AIR_GAP_HANDSHAKE_TIMEOUT_MS_KEY =
+ "sink.air-gap.handshake-timeout-ms";
+ public static final int CONNECTOR_AIR_GAP_HANDSHAKE_TIMEOUT_MS_DEFAULT_VALUE = 5000;
+
+ public static final String CONNECTOR_IOTDB_SYNC_CONNECTOR_VERSION_KEY = "connector.version";
+ public static final String SINK_IOTDB_SYNC_CONNECTOR_VERSION_KEY = "sink.version";
+ public static final String CONNECTOR_IOTDB_SYNC_CONNECTOR_VERSION_DEFAULT_VALUE = "1.1";
+
+ public static final String CONNECTOR_WEBSOCKET_PORT_KEY = "connector.websocket.port";
+ public static final String SINK_WEBSOCKET_PORT_KEY = "sink.websocket.port";
+ public static final int CONNECTOR_WEBSOCKET_PORT_DEFAULT_VALUE = 8080;
+
+ public static final String CONNECTOR_OPC_UA_MODEL_KEY = "connector.opcua.model";
+ public static final String SINK_OPC_UA_MODEL_KEY = "sink.opcua.model";
+ public static final String CONNECTOR_OPC_UA_MODEL_CLIENT_SERVER_VALUE = "client-server";
+ public static final String CONNECTOR_OPC_UA_MODEL_PUB_SUB_VALUE = "pub-sub";
+ public static final String CONNECTOR_OPC_UA_MODEL_DEFAULT_VALUE =
+ CONNECTOR_OPC_UA_MODEL_CLIENT_SERVER_VALUE;
+
+ public static final String CONNECTOR_OPC_UA_TCP_BIND_PORT_KEY = "connector.opcua.tcp.port";
+ public static final String SINK_OPC_UA_TCP_BIND_PORT_KEY = "sink.opcua.tcp.port";
+ public static final int CONNECTOR_OPC_UA_TCP_BIND_PORT_DEFAULT_VALUE = 12686;
+
+ public static final String CONNECTOR_OPC_UA_HTTPS_BIND_PORT_KEY = "connector.opcua.https.port";
+ public static final String SINK_OPC_UA_HTTPS_BIND_PORT_KEY = "sink.opcua.https.port";
+ public static final int CONNECTOR_OPC_UA_HTTPS_BIND_PORT_DEFAULT_VALUE = 8443;
+
+ public static final String CONNECTOR_OPC_UA_SECURITY_DIR_KEY = "connector.opcua.security.dir";
+ public static final String SINK_OPC_UA_SECURITY_DIR_KEY = "sink.opcua.security.dir";
+ // public static final String CONNECTOR_OPC_UA_SECURITY_DIR_DEFAULT_VALUE =
+ // CommonDescriptor.getInstance().getConfDir() != null
+ // ? CommonDescriptor.getInstance().getConfDir() + File.separatorChar + "opc_security"
+ // : System.getProperty("user.home") + File.separatorChar + "iotdb_opc_security";
+
+ public static final String CONNECTOR_OPC_UA_ENABLE_ANONYMOUS_ACCESS_KEY =
+ "connector.opcua.enable-anonymous-access";
+ public static final String SINK_OPC_UA_ENABLE_ANONYMOUS_ACCESS_KEY =
+ "sink.opcua.enable-anonymous-access";
+ public static final boolean CONNECTOR_OPC_UA_ENABLE_ANONYMOUS_ACCESS_DEFAULT_VALUE = true;
+
+ public static final String CONNECTOR_OPC_UA_PLACEHOLDER_KEY = "connector.opcua.placeholder";
+ public static final String SINK_OPC_UA_PLACEHOLDER_KEY = "sink.opcua.placeholder";
+ public static final String CONNECTOR_OPC_UA_PLACEHOLDER_DEFAULT_VALUE = "null";
+
+ public static final String CONNECTOR_LEADER_CACHE_ENABLE_KEY = "connector.leader-cache.enable";
+ public static final String SINK_LEADER_CACHE_ENABLE_KEY = "sink.leader-cache.enable";
+ public static final boolean CONNECTOR_LEADER_CACHE_ENABLE_DEFAULT_VALUE = true;
+
+ public static final String CONNECTOR_LOAD_BALANCE_STRATEGY_KEY =
+ "connector.load-balance-strategy";
+ public static final String SINK_LOAD_BALANCE_STRATEGY_KEY = "sink.load-balance-strategy";
+ public static final String CONNECTOR_LOAD_BALANCE_ROUND_ROBIN_STRATEGY = "round-robin";
+ public static final String CONNECTOR_LOAD_BALANCE_RANDOM_STRATEGY = "random";
+ public static final String CONNECTOR_LOAD_BALANCE_PRIORITY_STRATEGY = "priority";
+ public static final Set CONNECTOR_LOAD_BALANCE_STRATEGY_SET =
+ Collections.unmodifiableSet(
+ new HashSet<>(
+ Arrays.asList(
+ CONNECTOR_LOAD_BALANCE_ROUND_ROBIN_STRATEGY,
+ CONNECTOR_LOAD_BALANCE_RANDOM_STRATEGY,
+ CONNECTOR_LOAD_BALANCE_PRIORITY_STRATEGY)));
+
+ public static final String CONNECTOR_COMPRESSOR_KEY = "connector.compressor";
+ public static final String SINK_COMPRESSOR_KEY = "sink.compressor";
+ public static final String CONNECTOR_COMPRESSOR_DEFAULT_VALUE = "";
+ public static final String CONNECTOR_COMPRESSOR_SNAPPY = "snappy";
+ public static final String CONNECTOR_COMPRESSOR_GZIP = "gzip";
+ public static final String CONNECTOR_COMPRESSOR_LZ4 = "lz4";
+ public static final String CONNECTOR_COMPRESSOR_ZSTD = "zstd";
+ public static final String CONNECTOR_COMPRESSOR_LZMA2 = "lzma2";
+ public static final Set CONNECTOR_COMPRESSOR_SET =
+ Collections.unmodifiableSet(
+ new HashSet<>(
+ Arrays.asList(
+ CONNECTOR_COMPRESSOR_SNAPPY,
+ CONNECTOR_COMPRESSOR_GZIP,
+ CONNECTOR_COMPRESSOR_LZ4,
+ CONNECTOR_COMPRESSOR_ZSTD,
+ CONNECTOR_COMPRESSOR_LZMA2)));
+
+ public static final String CONNECTOR_COMPRESSOR_ZSTD_LEVEL_KEY =
+ "connector.compressor.zstd.level";
+ public static final String SINK_COMPRESSOR_ZSTD_LEVEL_KEY = "sink.compressor.zstd.level";
+ public static final int CONNECTOR_COMPRESSOR_ZSTD_LEVEL_DEFAULT_VALUE =
+ Zstd.defaultCompressionLevel();
+ public static final int CONNECTOR_COMPRESSOR_ZSTD_LEVEL_MIN_VALUE = Zstd.minCompressionLevel();
+ public static final int CONNECTOR_COMPRESSOR_ZSTD_LEVEL_MAX_VALUE = Zstd.maxCompressionLevel();
+
+ public static final String CONNECTOR_RATE_LIMIT_KEY = "connector.rate-limit-bytes-per-second";
+ public static final String SINK_RATE_LIMIT_KEY = "sink.rate-limit-bytes-per-second";
+ public static final double CONNECTOR_RATE_LIMIT_DEFAULT_VALUE = -1;
+
+ public static final String CONNECTOR_FORMAT_KEY = "connector.format";
+ public static final String SINK_FORMAT_KEY = "sink.format";
+ public static final String CONNECTOR_FORMAT_TABLET_VALUE = "tablet";
+ public static final String CONNECTOR_FORMAT_TS_FILE_VALUE = "tsfile";
+ public static final String CONNECTOR_FORMAT_HYBRID_VALUE = "hybrid";
+
+ public static final String SINK_TOPIC_KEY = "sink.topic";
+ public static final String SINK_CONSUMER_GROUP_KEY = "sink.consumer-group";
+
+ public static final String CONNECTOR_CONSENSUS_GROUP_ID_KEY = "connector.consensus.group-id";
+ public static final String CONNECTOR_CONSENSUS_PIPE_NAME = "connector.consensus.pipe-name";
+
+ public static final String CONNECTOR_LOAD_TSFILE_STRATEGY_KEY = "connector.load-tsfile-strategy";
+ public static final String SINK_LOAD_TSFILE_STRATEGY_KEY = "sink.load-tsfile-strategy";
+ public static final String CONNECTOR_LOAD_TSFILE_STRATEGY_ASYNC_VALUE = "async";
+ public static final String CONNECTOR_LOAD_TSFILE_STRATEGY_SYNC_VALUE = "sync";
+ public static final Set CONNECTOR_LOAD_TSFILE_STRATEGY_SET =
+ Collections.unmodifiableSet(
+ new HashSet<>(
+ Arrays.asList(
+ CONNECTOR_LOAD_TSFILE_STRATEGY_ASYNC_VALUE,
+ CONNECTOR_LOAD_TSFILE_STRATEGY_SYNC_VALUE)));
+
+ public static final String CONNECTOR_LOAD_TSFILE_VALIDATION_KEY =
+ "connector.load-tsfile-validation";
+ public static final String SINK_LOAD_TSFILE_VALIDATION_KEY = "sink.load-tsfile-validation";
+ public static final boolean CONNECTOR_LOAD_TSFILE_VALIDATION_DEFAULT_VALUE = true;
+
+ public static final String CONNECTOR_MARK_AS_PIPE_REQUEST_KEY = "connector.mark-as-pipe-request";
+ public static final String SINK_MARK_AS_PIPE_REQUEST_KEY = "sink.mark-as-pipe-request";
+ public static final boolean CONNECTOR_MARK_AS_PIPE_REQUEST_DEFAULT_VALUE = true;
+
+ public static final String CONNECTOR_SKIP_IF_KEY = "connector.skipif";
+ public static final String SINK_SKIP_IF_KEY = "sink.skipif";
+ public static final String CONNECTOR_IOTDB_SKIP_IF_NO_PRIVILEGES = "no-privileges";
+
+ public static final String CONNECTOR_OPC_DA_CLSID_KEY = "connector.opcda.clsid";
+ public static final String SINK_OPC_DA_CLSID_KEY = "sink.opcda.clsid";
+
+ public static final String CONNECTOR_OPC_DA_PROGID_KEY = "connector.opcda.progid";
+ public static final String SINK_OPC_DA_PROGID_KEY = "sink.opcda.progid";
+
+ private PipeConnectorConstant() {
+ throw new IllegalStateException("Utility class");
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/constant/PipeTransferHandshakeConstant.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/constant/PipeTransferHandshakeConstant.java
new file mode 100644
index 0000000..1adcfab
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/constant/PipeTransferHandshakeConstant.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.constant;
+
+public class PipeTransferHandshakeConstant {
+
+ public static final String HANDSHAKE_KEY_TIME_PRECISION = "timestampPrecision";
+ public static final String HANDSHAKE_KEY_CLUSTER_ID = "clusterID";
+ public static final String HANDSHAKE_KEY_CONVERT_ON_TYPE_MISMATCH = "convertOnTypeMismatch";
+ public static final String HANDSHAKE_KEY_LOAD_TSFILE_STRATEGY = "loadTsFileStrategy";
+ public static final String HANDSHAKE_KEY_USERNAME = "username";
+ public static final String HANDSHAKE_KEY_PASSWORD = "password";
+ public static final String HANDSHAKE_KEY_VALIDATE_TSFILE = "validateTsFile";
+ public static final String HANDSHAKE_KEY_MARK_AS_PIPE_REQUEST = "markAsPipeRequest";
+
+ private PipeTransferHandshakeConstant() {
+ // Utility class
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/event/PipeInsertionEvent.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/event/PipeInsertionEvent.java
new file mode 100644
index 0000000..1027ece
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/event/PipeInsertionEvent.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.event;
+
+public abstract class PipeInsertionEvent {
+
+ protected Boolean isTableModelEvent;
+
+ protected String treeModelDatabaseName;
+ protected String tableModelDatabaseName;
+
+ public boolean isTableModelEvent() {
+ return isTableModelEvent;
+ }
+
+ public String getTreeModelDatabaseName() {
+ return treeModelDatabaseName;
+ }
+
+ public String getTableModelDatabaseName() {
+ return tableModelDatabaseName;
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/event/PipeRawTabletInsertionEvent.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/event/PipeRawTabletInsertionEvent.java
new file mode 100644
index 0000000..bee813a
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/event/PipeRawTabletInsertionEvent.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.event;
+
+import org.apache.iotdb.pipe.api.access.Row;
+import org.apache.iotdb.pipe.api.collector.RowCollector;
+import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent;
+
+import org.apache.tsfile.write.record.Tablet;
+
+import java.util.function.BiConsumer;
+
+public class PipeRawTabletInsertionEvent extends PipeInsertionEvent
+ implements TabletInsertionEvent, AutoCloseable {
+
+ public PipeRawTabletInsertionEvent(Tablet tablet, String deviceId) {
+ this.deviceId = deviceId;
+ this.tablet = tablet;
+ this.tablet.setDeviceId(deviceId);
+ }
+
+ public String deviceId;
+ protected Tablet tablet;
+ private boolean isAligned;
+
+ @Override
+ public Iterable processRowByRow(
+ final BiConsumer consumer) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Iterable processTablet(
+ final BiConsumer consumer) {
+ throw new UnsupportedOperationException();
+ }
+
+ public Tablet getTablet() {
+ return tablet;
+ }
+
+ public boolean isAligned() {
+ return isAligned;
+ }
+
+ public Tablet convertToTablet() {
+ return tablet;
+ }
+
+ public boolean isTableModelEvent() {
+ return false;
+ }
+
+ public String getDeviceId() {
+ return deviceId;
+ }
+
+ public String getTableModelDatabaseName() {
+ return null;
+ }
+
+ @Override
+ public void close() {}
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/event/PipeTsFileInsertionEvent.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/event/PipeTsFileInsertionEvent.java
new file mode 100644
index 0000000..2fc5e9a
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/event/PipeTsFileInsertionEvent.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.event;
+
+import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent;
+import org.apache.iotdb.pipe.api.event.dml.insertion.TsFileInsertionEvent;
+import org.apache.iotdb.pipe.api.exception.PipeException;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+
+public class PipeTsFileInsertionEvent extends PipeInsertionEvent implements TsFileInsertionEvent {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(PipeTsFileInsertionEvent.class);
+
+ private File tsFile;
+
+ @Override
+ public boolean isTableModelEvent() {
+ throw new PipeException("");
+ }
+
+ public File getTsFile() {
+ return tsFile;
+ }
+
+ @Override
+ public Iterable toTabletInsertionEvents() throws PipeException {
+ return toTabletInsertionEvents(Long.MAX_VALUE);
+ }
+
+ public Iterable toTabletInsertionEvents(final long timeoutMs)
+ throws PipeException {
+ return null;
+ }
+
+ @Override
+ public void close() throws Exception {}
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeConnectorCriticalException.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeConnectorCriticalException.java
new file mode 100644
index 0000000..041c7a3
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeConnectorCriticalException.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.exception;
+
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.Objects;
+
+public class PipeRuntimeConnectorCriticalException extends PipeRuntimeCriticalException {
+
+ public PipeRuntimeConnectorCriticalException(final String message) {
+ super(message);
+ }
+
+ public PipeRuntimeConnectorCriticalException(final String message, final long timeStamp) {
+ super(message, timeStamp);
+ }
+
+ @Override
+ public boolean equals(final Object obj) {
+ return obj instanceof PipeRuntimeConnectorCriticalException
+ && Objects.equals(getMessage(), ((PipeRuntimeConnectorCriticalException) obj).getMessage())
+ && Objects.equals(getTimeStamp(), ((PipeRuntimeException) obj).getTimeStamp());
+ }
+
+ @Override
+ public int hashCode() {
+ return super.hashCode();
+ }
+
+ @Override
+ public void serialize(final ByteBuffer byteBuffer) {
+ PipeRuntimeExceptionType.CONNECTOR_CRITICAL_EXCEPTION.serialize(byteBuffer);
+ ReadWriteIOUtils.write(getMessage(), byteBuffer);
+ ReadWriteIOUtils.write(getTimeStamp(), byteBuffer);
+ }
+
+ @Override
+ public void serialize(final OutputStream stream) throws IOException {
+ PipeRuntimeExceptionType.CONNECTOR_CRITICAL_EXCEPTION.serialize(stream);
+ ReadWriteIOUtils.write(getMessage(), stream);
+ ReadWriteIOUtils.write(getTimeStamp(), stream);
+ }
+
+ public static PipeRuntimeConnectorCriticalException deserializeFrom(
+ final PipeRuntimeMetaVersion version, final ByteBuffer byteBuffer) {
+ final String message = ReadWriteIOUtils.readString(byteBuffer);
+ switch (version) {
+ case VERSION_1:
+ return new PipeRuntimeConnectorCriticalException(message);
+ case VERSION_2:
+ return new PipeRuntimeConnectorCriticalException(
+ message, ReadWriteIOUtils.readLong(byteBuffer));
+ default:
+ throw new UnsupportedOperationException(String.format("Unsupported version %s", version));
+ }
+ }
+
+ public static PipeRuntimeConnectorCriticalException deserializeFrom(
+ final PipeRuntimeMetaVersion version, final InputStream stream) throws IOException {
+ final String message = ReadWriteIOUtils.readString(stream);
+ switch (version) {
+ case VERSION_1:
+ return new PipeRuntimeConnectorCriticalException(message);
+ case VERSION_2:
+ return new PipeRuntimeConnectorCriticalException(
+ message, ReadWriteIOUtils.readLong(stream));
+ default:
+ throw new UnsupportedOperationException(String.format("Unsupported version %s", version));
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "PipeRuntimeConnectorCriticalException{"
+ + "message='"
+ + getMessage()
+ + "', timeStamp="
+ + getTimeStamp()
+ + "}";
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeCriticalException.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeCriticalException.java
new file mode 100644
index 0000000..4ade0b2
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeCriticalException.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.exception;
+
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.Objects;
+
+public class PipeRuntimeCriticalException extends PipeRuntimeException {
+
+ public PipeRuntimeCriticalException(final String message) {
+ super(message);
+ }
+
+ public PipeRuntimeCriticalException(final String message, final long timeStamp) {
+ super(message, timeStamp);
+ }
+
+ @Override
+ public boolean equals(final Object obj) {
+ return obj instanceof PipeRuntimeCriticalException
+ && Objects.equals(getMessage(), ((PipeRuntimeCriticalException) obj).getMessage())
+ && Objects.equals(getTimeStamp(), ((PipeRuntimeException) obj).getTimeStamp());
+ }
+
+ @Override
+ public void serialize(final ByteBuffer byteBuffer) {
+ PipeRuntimeExceptionType.CRITICAL_EXCEPTION.serialize(byteBuffer);
+ ReadWriteIOUtils.write(getMessage(), byteBuffer);
+ ReadWriteIOUtils.write(getTimeStamp(), byteBuffer);
+ }
+
+ @Override
+ public void serialize(final OutputStream stream) throws IOException {
+ PipeRuntimeExceptionType.CRITICAL_EXCEPTION.serialize(stream);
+ ReadWriteIOUtils.write(getMessage(), stream);
+ ReadWriteIOUtils.write(getTimeStamp(), stream);
+ }
+
+ public static PipeRuntimeCriticalException deserializeFrom(
+ final PipeRuntimeMetaVersion version, final ByteBuffer byteBuffer) {
+ final String message = ReadWriteIOUtils.readString(byteBuffer);
+ switch (version) {
+ case VERSION_1:
+ return new PipeRuntimeCriticalException(message);
+ case VERSION_2:
+ return new PipeRuntimeCriticalException(message, ReadWriteIOUtils.readLong(byteBuffer));
+ default:
+ throw new UnsupportedOperationException(String.format("Unsupported version %s", version));
+ }
+ }
+
+ public static PipeRuntimeCriticalException deserializeFrom(
+ final PipeRuntimeMetaVersion version, final InputStream stream) throws IOException {
+ final String message = ReadWriteIOUtils.readString(stream);
+ switch (version) {
+ case VERSION_1:
+ return new PipeRuntimeCriticalException(message);
+ case VERSION_2:
+ return new PipeRuntimeCriticalException(message, ReadWriteIOUtils.readLong(stream));
+ default:
+ throw new UnsupportedOperationException(String.format("Unsupported version %s", version));
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "PipeRuntimeCriticalException{"
+ + "message='"
+ + getMessage()
+ + "', timeStamp="
+ + getTimeStamp()
+ + "}";
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeException.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeException.java
new file mode 100644
index 0000000..65652b4
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeException.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.exception;
+
+import org.apache.iotdb.pipe.api.exception.PipeException;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.Objects;
+
+public abstract class PipeRuntimeException extends PipeException {
+
+ protected PipeRuntimeException(final String message) {
+ super(message);
+ }
+
+ protected PipeRuntimeException(final String message, final long timeStamp) {
+ super(message, timeStamp);
+ }
+
+ @Override
+ public boolean equals(final Object obj) {
+ return obj instanceof PipeRuntimeException
+ && Objects.equals(getMessage(), ((PipeRuntimeException) obj).getMessage())
+ && Objects.equals(getTimeStamp(), ((PipeRuntimeException) obj).getTimeStamp());
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(getMessage(), getTimeStamp());
+ }
+
+ public abstract void serialize(final ByteBuffer byteBuffer);
+
+ public abstract void serialize(final OutputStream stream) throws IOException;
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeExceptionType.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeExceptionType.java
new file mode 100644
index 0000000..bd80d74
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeExceptionType.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.exception;
+
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+
+public enum PipeRuntimeExceptionType {
+ NON_CRITICAL_EXCEPTION((short) 1),
+ CRITICAL_EXCEPTION((short) 2),
+ CONNECTOR_CRITICAL_EXCEPTION((short) 3),
+ OUT_OF_MEMORY_CRITICAL_EXCEPTION((short) 4),
+ ;
+
+ private final short type;
+
+ PipeRuntimeExceptionType(short type) {
+ this.type = type;
+ }
+
+ public short getType() {
+ return type;
+ }
+
+ public void serialize(ByteBuffer byteBuffer) {
+ ReadWriteIOUtils.write(type, byteBuffer);
+ }
+
+ public void serialize(OutputStream stream) throws IOException {
+ ReadWriteIOUtils.write(type, stream);
+ }
+
+ public static PipeRuntimeException deserializeFrom(
+ PipeRuntimeMetaVersion version, ByteBuffer byteBuffer) {
+ final short type = ReadWriteIOUtils.readShort(byteBuffer);
+ switch (type) {
+ case 1:
+ return PipeRuntimeNonCriticalException.deserializeFrom(version, byteBuffer);
+ case 2:
+ return PipeRuntimeCriticalException.deserializeFrom(version, byteBuffer);
+ case 3:
+ return PipeRuntimeConnectorCriticalException.deserializeFrom(version, byteBuffer);
+ case 4:
+ return PipeRuntimeOutOfMemoryCriticalException.deserializeFrom(version, byteBuffer);
+ default:
+ throw new UnsupportedOperationException(
+ String.format("Unsupported PipeRuntimeException type %s.", type));
+ }
+ }
+
+ public static PipeRuntimeException deserializeFrom(
+ PipeRuntimeMetaVersion version, InputStream stream) throws IOException {
+ final short type = ReadWriteIOUtils.readShort(stream);
+ switch (type) {
+ case 1:
+ return PipeRuntimeNonCriticalException.deserializeFrom(version, stream);
+ case 2:
+ return PipeRuntimeCriticalException.deserializeFrom(version, stream);
+ case 3:
+ return PipeRuntimeConnectorCriticalException.deserializeFrom(version, stream);
+ case 4:
+ return PipeRuntimeOutOfMemoryCriticalException.deserializeFrom(version, stream);
+ default:
+ throw new UnsupportedOperationException(
+ String.format("Unsupported PipeRuntimeException type %s.", type));
+ }
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeMetaVersion.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeMetaVersion.java
new file mode 100644
index 0000000..9986df9
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeMetaVersion.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.exception;
+
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.HashMap;
+import java.util.Map;
+
+public enum PipeRuntimeMetaVersion {
+
+ // for compatibility use
+ VERSION_1(PipeStatus.RUNNING.getType()),
+
+ VERSION_2(Byte.MAX_VALUE),
+ ;
+
+ private static final Map VERSION_MAP = new HashMap<>();
+
+ static {
+ // for compatibility use
+ for (final PipeStatus status : PipeStatus.values()) {
+ VERSION_MAP.put(status.getType(), VERSION_1);
+ }
+
+ for (final PipeRuntimeMetaVersion version : PipeRuntimeMetaVersion.values()) {
+ VERSION_MAP.put(version.getVersion(), version);
+ }
+ }
+
+ private final byte version;
+
+ PipeRuntimeMetaVersion(byte version) {
+ this.version = version;
+ }
+
+ public byte getVersion() {
+ return version;
+ }
+
+ public void serialize(OutputStream outputStream) throws IOException {
+ ReadWriteIOUtils.write(version, outputStream);
+ }
+
+ public static PipeRuntimeMetaVersion deserialize(InputStream inputStream) throws IOException {
+ return deserialize(ReadWriteIOUtils.readByte(inputStream));
+ }
+
+ public static PipeRuntimeMetaVersion deserialize(ByteBuffer byteBuffer) {
+ return deserialize(ReadWriteIOUtils.readByte(byteBuffer));
+ }
+
+ public static PipeRuntimeMetaVersion deserialize(byte version) {
+ return VERSION_MAP.get(version);
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeNonCriticalException.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeNonCriticalException.java
new file mode 100644
index 0000000..ef41889
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeNonCriticalException.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.exception;
+
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.Objects;
+
+public class PipeRuntimeNonCriticalException extends PipeRuntimeException {
+
+ public PipeRuntimeNonCriticalException(String message) {
+ super(message);
+ }
+
+ public PipeRuntimeNonCriticalException(String message, long timeStamp) {
+ super(message, timeStamp);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ return obj instanceof PipeRuntimeNonCriticalException
+ && Objects.equals(getMessage(), ((PipeRuntimeNonCriticalException) obj).getMessage())
+ && Objects.equals(getTimeStamp(), ((PipeRuntimeException) obj).getTimeStamp());
+ }
+
+ @Override
+ public int hashCode() {
+ return super.hashCode();
+ }
+
+ @Override
+ public void serialize(ByteBuffer byteBuffer) {
+ PipeRuntimeExceptionType.NON_CRITICAL_EXCEPTION.serialize(byteBuffer);
+ ReadWriteIOUtils.write(getMessage(), byteBuffer);
+ ReadWriteIOUtils.write(getTimeStamp(), byteBuffer);
+ }
+
+ @Override
+ public void serialize(OutputStream stream) throws IOException {
+ PipeRuntimeExceptionType.NON_CRITICAL_EXCEPTION.serialize(stream);
+ ReadWriteIOUtils.write(getMessage(), stream);
+ ReadWriteIOUtils.write(getTimeStamp(), stream);
+ }
+
+ public static PipeRuntimeNonCriticalException deserializeFrom(
+ PipeRuntimeMetaVersion version, ByteBuffer byteBuffer) {
+ final String message = ReadWriteIOUtils.readString(byteBuffer);
+ switch (version) {
+ case VERSION_1:
+ return new PipeRuntimeNonCriticalException(message);
+ case VERSION_2:
+ return new PipeRuntimeNonCriticalException(message, ReadWriteIOUtils.readLong(byteBuffer));
+ default:
+ throw new UnsupportedOperationException(String.format("Unsupported version %s", version));
+ }
+ }
+
+ public static PipeRuntimeNonCriticalException deserializeFrom(
+ PipeRuntimeMetaVersion version, InputStream stream) throws IOException {
+ final String message = ReadWriteIOUtils.readString(stream);
+ switch (version) {
+ case VERSION_1:
+ return new PipeRuntimeNonCriticalException(message);
+ case VERSION_2:
+ return new PipeRuntimeNonCriticalException(message, ReadWriteIOUtils.readLong(stream));
+ default:
+ throw new UnsupportedOperationException(String.format("Unsupported version %s", version));
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "PipeRuntimeNonCriticalException{"
+ + "message='"
+ + getMessage()
+ + "', timeStamp="
+ + getTimeStamp()
+ + "}";
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeOutOfMemoryCriticalException.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeOutOfMemoryCriticalException.java
new file mode 100644
index 0000000..5106dcf
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeRuntimeOutOfMemoryCriticalException.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.exception;
+
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.Objects;
+
+public class PipeRuntimeOutOfMemoryCriticalException extends PipeRuntimeCriticalException {
+
+ public PipeRuntimeOutOfMemoryCriticalException(String message) {
+ super(message);
+ }
+
+ public PipeRuntimeOutOfMemoryCriticalException(String message, long timeStamp) {
+ super(message, timeStamp);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ return obj instanceof PipeRuntimeOutOfMemoryCriticalException
+ && Objects.equals(
+ getMessage(), ((PipeRuntimeOutOfMemoryCriticalException) obj).getMessage())
+ && Objects.equals(getTimeStamp(), ((PipeRuntimeException) obj).getTimeStamp());
+ }
+
+ @Override
+ public int hashCode() {
+ return super.hashCode();
+ }
+
+ @Override
+ public void serialize(ByteBuffer byteBuffer) {
+ PipeRuntimeExceptionType.OUT_OF_MEMORY_CRITICAL_EXCEPTION.serialize(byteBuffer);
+ ReadWriteIOUtils.write(getMessage(), byteBuffer);
+ ReadWriteIOUtils.write(getTimeStamp(), byteBuffer);
+ }
+
+ @Override
+ public void serialize(OutputStream stream) throws IOException {
+ PipeRuntimeExceptionType.OUT_OF_MEMORY_CRITICAL_EXCEPTION.serialize(stream);
+ ReadWriteIOUtils.write(getMessage(), stream);
+ ReadWriteIOUtils.write(getTimeStamp(), stream);
+ }
+
+ public static PipeRuntimeOutOfMemoryCriticalException deserializeFrom(
+ PipeRuntimeMetaVersion version, ByteBuffer byteBuffer) {
+ final String message = ReadWriteIOUtils.readString(byteBuffer);
+ switch (version) {
+ case VERSION_1:
+ return new PipeRuntimeOutOfMemoryCriticalException(message);
+ case VERSION_2:
+ return new PipeRuntimeOutOfMemoryCriticalException(
+ message, ReadWriteIOUtils.readLong(byteBuffer));
+ default:
+ throw new UnsupportedOperationException(String.format("Unsupported version %s", version));
+ }
+ }
+
+ public static PipeRuntimeOutOfMemoryCriticalException deserializeFrom(
+ PipeRuntimeMetaVersion version, InputStream stream) throws IOException {
+ final String message = ReadWriteIOUtils.readString(stream);
+ switch (version) {
+ case VERSION_1:
+ return new PipeRuntimeOutOfMemoryCriticalException(message);
+ case VERSION_2:
+ return new PipeRuntimeOutOfMemoryCriticalException(
+ message, ReadWriteIOUtils.readLong(stream));
+ default:
+ throw new UnsupportedOperationException(String.format("Unsupported version %s", version));
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "PipeRuntimeOutOfMemoryException{"
+ + "message='"
+ + getMessage()
+ + "', timeStamp="
+ + getTimeStamp()
+ + "}";
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeStatus.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeStatus.java
new file mode 100644
index 0000000..5ce28d1
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/exception/PipeStatus.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.exception;
+
+public enum PipeStatus {
+ RUNNING((byte) 0),
+ STOPPED((byte) 1),
+ DROPPED((byte) 2),
+ ;
+
+ private final byte type;
+
+ PipeStatus(byte type) {
+ this.type = type;
+ }
+
+ public byte getType() {
+ return type;
+ }
+
+ public static PipeStatus getPipeStatus(byte type) {
+ switch (type) {
+ case 0:
+ return PipeStatus.RUNNING;
+ case 1:
+ return PipeStatus.STOPPED;
+ case 2:
+ return PipeStatus.DROPPED;
+ default:
+ throw new IllegalArgumentException("Invalid input: " + type);
+ }
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/evolvable/batch/PipeTabletEventBatch.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/evolvable/batch/PipeTabletEventBatch.java
new file mode 100644
index 0000000..b26a065
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/evolvable/batch/PipeTabletEventBatch.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.payload.evolvable.batch;
+
+import org.apache.iotdb.collector.plugin.builtin.sink.event.PipeRawTabletInsertionEvent;
+import org.apache.iotdb.pipe.api.event.Event;
+import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+
+public abstract class PipeTabletEventBatch implements AutoCloseable {
+
+ protected final List events = new ArrayList<>();
+
+ private final int maxDelayInMs;
+ private long firstEventProcessingTime = Long.MIN_VALUE;
+
+ protected long totalBufferSize = 0;
+
+ protected volatile boolean isClosed = false;
+
+ protected PipeTabletEventBatch(final int maxDelayInMs, final long requestMaxBatchSizeInBytes) {
+ this.maxDelayInMs = maxDelayInMs;
+ }
+
+ /**
+ * Try offer {@link Event} into batch if the given {@link Event} is not duplicated.
+ *
+ * @param event the given {@link Event}
+ * @return {@code true} if the batch can be transferred
+ */
+ public synchronized boolean onEvent(final TabletInsertionEvent event) throws IOException {
+ if (isClosed || !(event instanceof PipeRawTabletInsertionEvent)) {
+ return false;
+ }
+
+ // The deduplication logic here is to avoid the accumulation of
+ // the same event in a batch when retrying.
+ if (events.isEmpty() || !Objects.equals(events.get(events.size() - 1), event)) {
+ if (constructBatch(event)) {
+ events.add((PipeRawTabletInsertionEvent) event);
+ }
+
+ if (firstEventProcessingTime == Long.MIN_VALUE) {
+ firstEventProcessingTime = System.currentTimeMillis();
+ }
+ }
+
+ return shouldEmit();
+ }
+
+ /**
+ * Added an {@link TabletInsertionEvent} into batch.
+ *
+ * @param event the {@link TabletInsertionEvent} in batch
+ * @return {@code true} if the event is calculated into batch, {@code false} if the event is
+ * cached and not emitted in this batch. If there are failure encountered, just throw
+ * exceptions and do not return {@code false} here.
+ */
+ protected abstract boolean constructBatch(final TabletInsertionEvent event) throws IOException;
+
+ public boolean shouldEmit() {
+ return totalBufferSize >= getMaxBatchSizeInBytes()
+ || System.currentTimeMillis() - firstEventProcessingTime >= maxDelayInMs;
+ }
+
+ private long getMaxBatchSizeInBytes() {
+ // return allocatedMemoryBlock.getMemoryUsageInBytes();
+ return 16777216;
+ }
+
+ public synchronized void onSuccess() {
+ events.clear();
+
+ totalBufferSize = 0;
+
+ firstEventProcessingTime = Long.MIN_VALUE;
+ }
+
+ @Override
+ public synchronized void close() {
+ isClosed = true;
+
+ events.clear();
+ }
+
+ public List deepCopyEvents() {
+ return new ArrayList<>(events);
+ }
+
+ public boolean isEmpty() {
+ return events.isEmpty();
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/evolvable/batch/PipeTabletEventPlainBatch.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/evolvable/batch/PipeTabletEventPlainBatch.java
new file mode 100644
index 0000000..e147788
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/evolvable/batch/PipeTabletEventPlainBatch.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.payload.evolvable.batch;
+
+import org.apache.iotdb.collector.plugin.builtin.sink.event.PipeRawTabletInsertionEvent;
+import org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request.PipeTransferTabletBatchReqV2;
+import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent;
+
+import org.apache.tsfile.utils.Pair;
+import org.apache.tsfile.utils.PublicBAOS;
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public class PipeTabletEventPlainBatch extends PipeTabletEventBatch {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(PipeTabletEventPlainBatch.class);
+
+ private final List binaryBuffers = new ArrayList<>();
+ private final List insertNodeBuffers = new ArrayList<>();
+ private final List tabletBuffers = new ArrayList<>();
+
+ private static final String TREE_MODEL_DATABASE_PLACEHOLDER = null;
+ private final List binaryDataBases = new ArrayList<>();
+ private final List insertNodeDataBases = new ArrayList<>();
+ private final List tabletDataBases = new ArrayList<>();
+
+ // Used to rate limit when transferring data
+ private final Map, Long> pipe2BytesAccumulated = new HashMap<>();
+
+ PipeTabletEventPlainBatch(final int maxDelayInMs, final long requestMaxBatchSizeInBytes) {
+ super(maxDelayInMs, requestMaxBatchSizeInBytes);
+ }
+
+ @Override
+ protected boolean constructBatch(final TabletInsertionEvent event) throws IOException {
+ final int bufferSize = buildTabletInsertionBuffer(event);
+ totalBufferSize += bufferSize;
+ pipe2BytesAccumulated.compute(
+ new Pair<>("", 0L),
+ (pipeName, bytesAccumulated) ->
+ bytesAccumulated == null ? bufferSize : bytesAccumulated + bufferSize);
+ return true;
+ }
+
+ @Override
+ public synchronized void onSuccess() {
+ super.onSuccess();
+
+ binaryBuffers.clear();
+ insertNodeBuffers.clear();
+ tabletBuffers.clear();
+
+ binaryDataBases.clear();
+ insertNodeDataBases.clear();
+ tabletDataBases.clear();
+
+ pipe2BytesAccumulated.clear();
+ }
+
+ public PipeTransferTabletBatchReqV2 toTPipeTransferReq() throws IOException {
+ return PipeTransferTabletBatchReqV2.toTPipeTransferReq(
+ binaryBuffers,
+ insertNodeBuffers,
+ tabletBuffers,
+ binaryDataBases,
+ insertNodeDataBases,
+ tabletDataBases);
+ }
+
+ private int buildTabletInsertionBuffer(final TabletInsertionEvent event) throws IOException {
+ int databaseEstimateSize;
+ final ByteBuffer buffer;
+
+ final PipeRawTabletInsertionEvent pipeRawTabletInsertionEvent =
+ (PipeRawTabletInsertionEvent) event;
+ try (final PublicBAOS byteArrayOutputStream = new PublicBAOS();
+ final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) {
+ pipeRawTabletInsertionEvent.convertToTablet().serialize(outputStream);
+ ReadWriteIOUtils.write(pipeRawTabletInsertionEvent.isAligned(), outputStream);
+ buffer = ByteBuffer.wrap(byteArrayOutputStream.getBuf(), 0, byteArrayOutputStream.size());
+ }
+ tabletBuffers.add(buffer);
+ if (pipeRawTabletInsertionEvent.isTableModelEvent()) {
+ databaseEstimateSize = pipeRawTabletInsertionEvent.getTableModelDatabaseName().length();
+ tabletDataBases.add(pipeRawTabletInsertionEvent.getTableModelDatabaseName());
+ } else {
+ databaseEstimateSize = 4;
+ tabletDataBases.add(TREE_MODEL_DATABASE_PLACEHOLDER);
+ }
+
+ return buffer.limit() + databaseEstimateSize;
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/evolvable/batch/PipeTabletEventTsFileBatch.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/evolvable/batch/PipeTabletEventTsFileBatch.java
new file mode 100644
index 0000000..93e9d37
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/evolvable/batch/PipeTabletEventTsFileBatch.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.payload.evolvable.batch;
+
+import org.apache.iotdb.collector.plugin.builtin.sink.event.PipeRawTabletInsertionEvent;
+import org.apache.iotdb.collector.utils.builder.PipeTableModeTsFileBuilder;
+import org.apache.iotdb.collector.utils.builder.PipeTreeModelTsFileBuilder;
+import org.apache.iotdb.collector.utils.builder.PipeTsFileBuilder;
+import org.apache.iotdb.collector.utils.sorter.PipeTableModelTabletEventSorter;
+import org.apache.iotdb.collector.utils.sorter.PipeTreeModelTabletEventSorter;
+import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent;
+
+import org.apache.tsfile.exception.write.WriteProcessException;
+import org.apache.tsfile.utils.Pair;
+import org.apache.tsfile.write.record.Tablet;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.concurrent.atomic.AtomicLong;
+
+public class PipeTabletEventTsFileBatch extends PipeTabletEventBatch {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(PipeTabletEventTsFileBatch.class);
+
+ private static final AtomicLong BATCH_ID_GENERATOR = new AtomicLong(0);
+ private final AtomicLong currentBatchId = new AtomicLong(BATCH_ID_GENERATOR.incrementAndGet());
+
+ private final PipeTsFileBuilder treeModeTsFileBuilder;
+ private final PipeTsFileBuilder tableModeTsFileBuilder;
+
+ private final Map, Double> pipeName2WeightMap = new HashMap<>();
+
+ public PipeTabletEventTsFileBatch(final int maxDelayInMs, final long requestMaxBatchSizeInBytes) {
+ super(maxDelayInMs, requestMaxBatchSizeInBytes);
+
+ final AtomicLong tsFileIdGenerator = new AtomicLong(0);
+ treeModeTsFileBuilder = new PipeTreeModelTsFileBuilder(currentBatchId, tsFileIdGenerator);
+ tableModeTsFileBuilder = new PipeTableModeTsFileBuilder(currentBatchId, tsFileIdGenerator);
+ }
+
+ @Override
+ protected boolean constructBatch(final TabletInsertionEvent event) {
+ if (event instanceof PipeRawTabletInsertionEvent) {
+ final PipeRawTabletInsertionEvent rawTabletInsertionEvent =
+ (PipeRawTabletInsertionEvent) event;
+ final Tablet tablet = rawTabletInsertionEvent.convertToTablet();
+ if (tablet.getRowSize() == 0) {
+ return true;
+ }
+ if (rawTabletInsertionEvent.isTableModelEvent()) {
+ // table Model
+ bufferTableModelTablet("", 0L, tablet, rawTabletInsertionEvent.getTableModelDatabaseName());
+ } else {
+ // tree Model
+ bufferTreeModelTablet("", 0L, tablet, rawTabletInsertionEvent.isAligned());
+ }
+ } else {
+ LOGGER.warn(
+ "Batch id = {}: Unsupported event {} type {} when constructing tsfile batch",
+ currentBatchId.get(),
+ event,
+ event.getClass());
+ }
+ return true;
+ }
+
+ private void bufferTreeModelTablet(
+ final String pipeName,
+ final long creationTime,
+ final Tablet tablet,
+ final boolean isAligned) {
+ new PipeTreeModelTabletEventSorter(tablet).deduplicateAndSortTimestampsIfNecessary();
+
+ pipeName2WeightMap.compute(
+ new Pair<>(pipeName, creationTime),
+ (pipe, weight) -> Objects.nonNull(weight) ? ++weight : 1);
+
+ treeModeTsFileBuilder.bufferTreeModelTablet(tablet, isAligned);
+ }
+
+ private void bufferTableModelTablet(
+ final String pipeName, final long creationTime, final Tablet tablet, final String dataBase) {
+ new PipeTableModelTabletEventSorter(tablet).sortAndDeduplicateByDevIdTimestamp();
+
+ // totalBufferSize += PipeMemoryWeightUtil.calculateTabletSizeInBytes(tablet);
+
+ pipeName2WeightMap.compute(
+ new Pair<>(pipeName, creationTime),
+ (pipe, weight) -> Objects.nonNull(weight) ? ++weight : 1);
+
+ tableModeTsFileBuilder.bufferTableModelTablet(dataBase, tablet);
+ }
+
+ public Map, Double> deepCopyPipe2WeightMap() {
+ final double sum = pipeName2WeightMap.values().stream().reduce(Double::sum).orElse(0.0);
+ if (sum == 0.0) {
+ return Collections.emptyMap();
+ }
+ pipeName2WeightMap.entrySet().forEach(entry -> entry.setValue(entry.getValue() / sum));
+ return new HashMap<>(pipeName2WeightMap);
+ }
+
+ /**
+ * Converts a Tablet to a TSFile and returns the generated TSFile along with its corresponding
+ * database name.
+ *
+ * @return a list of pairs containing the database name and the generated TSFile
+ * @throws IOException if an I/O error occurs during the conversion process
+ * @throws WriteProcessException if an error occurs during the write process
+ */
+ public synchronized List> sealTsFiles()
+ throws IOException, WriteProcessException {
+ if (isClosed) {
+ return Collections.emptyList();
+ }
+
+ final List> list = new ArrayList<>();
+ if (!treeModeTsFileBuilder.isEmpty()) {
+ list.addAll(treeModeTsFileBuilder.convertTabletToTsFileWithDBInfo());
+ }
+ if (!tableModeTsFileBuilder.isEmpty()) {
+ list.addAll(tableModeTsFileBuilder.convertTabletToTsFileWithDBInfo());
+ }
+ return list;
+ }
+
+ @Override
+ public synchronized void onSuccess() {
+ super.onSuccess();
+
+ pipeName2WeightMap.clear();
+ tableModeTsFileBuilder.onSuccess();
+ treeModeTsFileBuilder.onSuccess();
+ }
+
+ @Override
+ public synchronized void close() {
+ super.close();
+
+ pipeName2WeightMap.clear();
+
+ tableModeTsFileBuilder.close();
+ treeModeTsFileBuilder.close();
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/evolvable/batch/PipeTransferBatchReqBuilder.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/evolvable/batch/PipeTransferBatchReqBuilder.java
new file mode 100644
index 0000000..5cd29e4
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/evolvable/batch/PipeTransferBatchReqBuilder.java
@@ -0,0 +1,179 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.payload.evolvable.batch;
+
+import org.apache.iotdb.collector.config.PipeRuntimeOptions;
+import org.apache.iotdb.collector.plugin.builtin.sink.event.PipeRawTabletInsertionEvent;
+import org.apache.iotdb.common.rpc.thrift.TEndPoint;
+import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters;
+import org.apache.iotdb.pipe.api.event.Event;
+import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent;
+
+import org.apache.tsfile.utils.Pair;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_FORMAT_HYBRID_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_FORMAT_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_FORMAT_TS_FILE_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_IOTDB_BATCH_DELAY_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_IOTDB_BATCH_SIZE_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_IOTDB_PLAIN_BATCH_DELAY_DEFAULT_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_IOTDB_PLAIN_BATCH_SIZE_DEFAULT_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_IOTDB_TS_FILE_BATCH_DELAY_DEFAULT_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_IOTDB_TS_FILE_BATCH_SIZE_DEFAULT_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_LEADER_CACHE_ENABLE_DEFAULT_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_LEADER_CACHE_ENABLE_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_FORMAT_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_IOTDB_BATCH_DELAY_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_IOTDB_BATCH_SIZE_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_LEADER_CACHE_ENABLE_KEY;
+
+public class PipeTransferBatchReqBuilder implements AutoCloseable {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(PipeTransferBatchReqBuilder.class);
+
+ private final boolean useLeaderCache;
+
+ private final int requestMaxDelayInMs;
+ private final long requestMaxBatchSizeInBytes;
+
+ // If the leader cache is disabled (or unable to find the endpoint of event in the leader cache),
+ // the event will be stored in the default batch.
+ private final PipeTabletEventBatch defaultBatch;
+ // If the leader cache is enabled, the batch will be divided by the leader endpoint,
+ // each endpoint has a batch.
+ // This is only used in plain batch since tsfile does not return redirection info.
+ private final Map endPointToBatch = new HashMap<>();
+
+ public PipeTransferBatchReqBuilder(final PipeParameters parameters) {
+ final boolean usingTsFileBatch =
+ parameters
+ .getStringOrDefault(
+ Arrays.asList(CONNECTOR_FORMAT_KEY, SINK_FORMAT_KEY), CONNECTOR_FORMAT_HYBRID_VALUE)
+ .equals(CONNECTOR_FORMAT_TS_FILE_VALUE);
+
+ useLeaderCache =
+ !usingTsFileBatch
+ && parameters.getBooleanOrDefault(
+ Arrays.asList(SINK_LEADER_CACHE_ENABLE_KEY, CONNECTOR_LEADER_CACHE_ENABLE_KEY),
+ CONNECTOR_LEADER_CACHE_ENABLE_DEFAULT_VALUE);
+
+ final int requestMaxDelayInSeconds;
+ if (usingTsFileBatch) {
+ requestMaxDelayInSeconds =
+ parameters.getIntOrDefault(
+ Arrays.asList(CONNECTOR_IOTDB_BATCH_DELAY_KEY, SINK_IOTDB_BATCH_DELAY_KEY),
+ CONNECTOR_IOTDB_TS_FILE_BATCH_DELAY_DEFAULT_VALUE);
+ requestMaxDelayInMs =
+ requestMaxDelayInSeconds < 0 ? Integer.MAX_VALUE : requestMaxDelayInSeconds * 1000;
+ requestMaxBatchSizeInBytes =
+ parameters.getLongOrDefault(
+ Arrays.asList(CONNECTOR_IOTDB_BATCH_SIZE_KEY, SINK_IOTDB_BATCH_SIZE_KEY),
+ CONNECTOR_IOTDB_TS_FILE_BATCH_SIZE_DEFAULT_VALUE);
+ this.defaultBatch =
+ new PipeTabletEventTsFileBatch(requestMaxDelayInMs, requestMaxBatchSizeInBytes);
+ } else {
+ requestMaxDelayInSeconds =
+ parameters.getIntOrDefault(
+ Arrays.asList(CONNECTOR_IOTDB_BATCH_DELAY_KEY, SINK_IOTDB_BATCH_DELAY_KEY),
+ CONNECTOR_IOTDB_PLAIN_BATCH_DELAY_DEFAULT_VALUE);
+ requestMaxDelayInMs =
+ requestMaxDelayInSeconds < 0 ? Integer.MAX_VALUE : requestMaxDelayInSeconds * 1000;
+ requestMaxBatchSizeInBytes =
+ parameters.getLongOrDefault(
+ Arrays.asList(CONNECTOR_IOTDB_BATCH_SIZE_KEY, SINK_IOTDB_BATCH_SIZE_KEY),
+ CONNECTOR_IOTDB_PLAIN_BATCH_SIZE_DEFAULT_VALUE);
+ this.defaultBatch =
+ new PipeTabletEventPlainBatch(requestMaxDelayInMs, requestMaxBatchSizeInBytes);
+ }
+ }
+
+ /**
+ * Try offer {@link Event} into the corresponding batch if the given {@link Event} is not
+ * duplicated.
+ *
+ * @param event the given {@link Event}
+ * @return {@link Pair}<{@link TEndPoint}, {@link PipeTabletEventPlainBatch}> not null means this
+ * {@link PipeTabletEventPlainBatch} can be transferred. the first element is the leader
+ * endpoint to transfer to (might be null), the second element is the batch to be transferred.
+ */
+ public synchronized Pair onEvent(
+ final TabletInsertionEvent event) throws IOException {
+ if (!(event instanceof PipeRawTabletInsertionEvent)) {
+ LOGGER.warn(
+ "Unsupported event {} type {} when building transfer request", event, event.getClass());
+ return null;
+ }
+
+ if (!useLeaderCache) {
+ return defaultBatch.onEvent(event) ? new Pair<>(null, defaultBatch) : null;
+ }
+
+ String deviceId = ((PipeRawTabletInsertionEvent) event).getDeviceId();
+
+ if (Objects.isNull(deviceId)) {
+ return defaultBatch.onEvent(event) ? new Pair<>(null, defaultBatch) : null;
+ }
+
+ final TEndPoint endPoint =
+ new TEndPoint(PipeRuntimeOptions.RPC_ADDRESS.value(), PipeRuntimeOptions.RPC_PORT.value());
+
+ final PipeTabletEventPlainBatch batch =
+ endPointToBatch.computeIfAbsent(
+ endPoint,
+ k -> new PipeTabletEventPlainBatch(requestMaxDelayInMs, requestMaxBatchSizeInBytes));
+ return batch.onEvent(event) ? new Pair<>(endPoint, batch) : null;
+ }
+
+ /** Get all batches that have at least 1 event. */
+ public synchronized List> getAllNonEmptyBatches() {
+ final List> nonEmptyBatches = new ArrayList<>();
+ if (!defaultBatch.isEmpty()) {
+ nonEmptyBatches.add(new Pair<>(null, defaultBatch));
+ }
+ endPointToBatch.forEach(
+ (endPoint, batch) -> {
+ if (!batch.isEmpty()) {
+ nonEmptyBatches.add(new Pair<>(endPoint, batch));
+ }
+ });
+ return nonEmptyBatches;
+ }
+
+ public boolean isEmpty() {
+ return defaultBatch.isEmpty()
+ && endPointToBatch.values().stream().allMatch(PipeTabletEventPlainBatch::isEmpty);
+ }
+
+ @Override
+ public synchronized void close() {
+ defaultBatch.close();
+ endPointToBatch.values().forEach(PipeTabletEventPlainBatch::close);
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/IoTDBConnectorRequestVersion.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/IoTDBConnectorRequestVersion.java
new file mode 100644
index 0000000..54f10f2
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/IoTDBConnectorRequestVersion.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request;
+
+public enum IoTDBConnectorRequestVersion {
+ VERSION_1((byte) 1),
+ VERSION_2((byte) 2),
+ ;
+
+ private final byte version;
+
+ IoTDBConnectorRequestVersion(byte type) {
+ this.version = type;
+ }
+
+ public byte getVersion() {
+ return version;
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeRequestType.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeRequestType.java
new file mode 100644
index 0000000..aab7926
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeRequestType.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+
+public enum PipeRequestType {
+
+ // Handshake
+ HANDSHAKE_CONFIGNODE_V1((short) 0),
+ HANDSHAKE_DATANODE_V1((short) 1),
+ HANDSHAKE_CONFIGNODE_V2((short) 50),
+ HANDSHAKE_DATANODE_V2((short) 51),
+
+ // Data region
+ TRANSFER_TABLET_INSERT_NODE((short) 2),
+ TRANSFER_TABLET_RAW((short) 3),
+ TRANSFER_TS_FILE_PIECE((short) 4),
+ TRANSFER_TS_FILE_SEAL((short) 5),
+ TRANSFER_TABLET_BATCH((short) 6),
+ TRANSFER_TABLET_BINARY((short) 7),
+ TRANSFER_TS_FILE_PIECE_WITH_MOD((short) 8),
+ TRANSFER_TS_FILE_SEAL_WITH_MOD((short) 9),
+
+ TRANSFER_TABLET_INSERT_NODE_V2((short) 10),
+ TRANSFER_TABLET_RAW_V2((short) 11),
+ TRANSFER_TABLET_BINARY_V2((short) 12),
+ TRANSFER_TABLET_BATCH_V2((short) 13),
+
+ // Schema region / Delete Data
+ TRANSFER_PLAN_NODE((short) 100),
+ TRANSFER_SCHEMA_SNAPSHOT_PIECE((short) 101),
+ TRANSFER_SCHEMA_SNAPSHOT_SEAL((short) 102),
+
+ // Config region
+ TRANSFER_CONFIG_PLAN((short) 200),
+ TRANSFER_CONFIG_SNAPSHOT_PIECE((short) 201),
+ TRANSFER_CONFIG_SNAPSHOT_SEAL((short) 202),
+
+ // RPC Compression
+ TRANSFER_COMPRESSED((short) 300),
+
+ // Fallback Handling
+ TRANSFER_SLICE((short) 400),
+ ;
+
+ private final short type;
+
+ PipeRequestType(short type) {
+ this.type = type;
+ }
+
+ public short getType() {
+ return type;
+ }
+
+ private static final Map TYPE_MAP =
+ Arrays.stream(PipeRequestType.values())
+ .collect(
+ HashMap::new,
+ (typeMap, pipeRequestType) -> typeMap.put(pipeRequestType.getType(), pipeRequestType),
+ HashMap::putAll);
+
+ public static boolean isValidatedRequestType(short type) {
+ return TYPE_MAP.containsKey(type);
+ }
+
+ public static PipeRequestType valueOf(short type) {
+ return TYPE_MAP.get(type);
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferCompressedReq.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferCompressedReq.java
new file mode 100644
index 0000000..7f53932
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferCompressedReq.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request;
+
+import org.apache.iotdb.collector.plugin.builtin.sink.compressor.PipeCompressor;
+import org.apache.iotdb.collector.plugin.builtin.sink.compressor.PipeCompressorFactory;
+import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq;
+
+import org.apache.tsfile.utils.BytesUtils;
+import org.apache.tsfile.utils.PublicBAOS;
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+public class PipeTransferCompressedReq extends TPipeTransferReq {
+
+ /** Generate a compressed req with provided compressors. */
+ public static TPipeTransferReq toTPipeTransferReq(
+ final TPipeTransferReq originalReq, final List compressors)
+ throws IOException {
+ // The generated PipeTransferCompressedReq consists of:
+ // version
+ // type: TRANSFER_COMPRESSED
+ // body:
+ // (byte) count of compressors (n)
+ // (n*3 bytes) for each compressor:
+ // (byte) compressor type
+ // (int) length of uncompressed bytes
+ // compressed req:
+ // (byte) version
+ // (2 bytes) type
+ // (bytes) body
+ final PipeTransferCompressedReq compressedReq = new PipeTransferCompressedReq();
+ compressedReq.version = IoTDBConnectorRequestVersion.VERSION_1.getVersion();
+ compressedReq.type = PipeRequestType.TRANSFER_COMPRESSED.getType();
+
+ try (final PublicBAOS byteArrayOutputStream = new PublicBAOS();
+ final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) {
+ byte[] body =
+ BytesUtils.concatByteArrayList(
+ Arrays.asList(
+ new byte[] {originalReq.version},
+ BytesUtils.shortToBytes(originalReq.type),
+ originalReq.getBody()));
+
+ ReadWriteIOUtils.write((byte) compressors.size(), outputStream);
+ for (final PipeCompressor compressor : compressors) {
+ ReadWriteIOUtils.write(compressor.serialize(), outputStream);
+ ReadWriteIOUtils.write(body.length, outputStream);
+ body = compressor.compress(body);
+ }
+ outputStream.write(body);
+
+ compressedReq.body =
+ ByteBuffer.wrap(byteArrayOutputStream.getBuf(), 0, byteArrayOutputStream.size());
+ }
+ return compressedReq;
+ }
+
+ /** Get the original req from a compressed req. */
+ public static TPipeTransferReq fromTPipeTransferReq(final TPipeTransferReq transferReq)
+ throws IOException {
+ final ByteBuffer compressedBuffer = transferReq.body;
+
+ final List compressors = new ArrayList<>();
+ final List uncompressedLengths = new ArrayList<>();
+ final int compressorsSize = ReadWriteIOUtils.readByte(compressedBuffer);
+ for (int i = 0; i < compressorsSize; ++i) {
+ compressors.add(
+ PipeCompressorFactory.getCompressor(ReadWriteIOUtils.readByte(compressedBuffer)));
+ uncompressedLengths.add(ReadWriteIOUtils.readInt(compressedBuffer));
+ }
+
+ byte[] body = new byte[compressedBuffer.remaining()];
+ compressedBuffer.get(body);
+
+ for (int i = compressors.size() - 1; i >= 0; --i) {
+ body = compressors.get(i).decompress(body, uncompressedLengths.get(i));
+ }
+
+ final ByteBuffer decompressedBuffer = ByteBuffer.wrap(body);
+
+ final TPipeTransferReq decompressedReq = new TPipeTransferReq();
+ decompressedReq.version = ReadWriteIOUtils.readByte(decompressedBuffer);
+ decompressedReq.type = ReadWriteIOUtils.readShort(decompressedBuffer);
+ decompressedReq.body = decompressedBuffer.slice();
+
+ return decompressedReq;
+ }
+
+ /**
+ * For air-gap connectors. Generate the bytes of a compressed req from the bytes of original req.
+ */
+ public static byte[] toTPipeTransferReqBytes(
+ final byte[] rawReqInBytes, final List compressors) throws IOException {
+ // The generated bytes consists of:
+ // (byte) version
+ // (2 bytes) type: TRANSFER_COMPRESSED
+ // (byte) count of compressors (n)
+ // (n*3 bytes) for each compressor:
+ // (byte) compressor type
+ // (int) length of uncompressed bytes
+ // compressed req:
+ // (byte) version
+ // (2 bytes) type
+ // (bytes) body
+ try (final PublicBAOS byteArrayOutputStream = new PublicBAOS();
+ final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) {
+ byte[] body = rawReqInBytes;
+
+ ReadWriteIOUtils.write(IoTDBConnectorRequestVersion.VERSION_1.getVersion(), outputStream);
+ ReadWriteIOUtils.write(PipeRequestType.TRANSFER_COMPRESSED.getType(), outputStream);
+ ReadWriteIOUtils.write((byte) compressors.size(), outputStream);
+ for (final PipeCompressor compressor : compressors) {
+ ReadWriteIOUtils.write(compressor.serialize(), outputStream);
+ ReadWriteIOUtils.write(body.length, outputStream);
+ body = compressor.compress(body);
+ }
+ outputStream.write(body);
+
+ return byteArrayOutputStream.toByteArray();
+ }
+ }
+
+ private PipeTransferCompressedReq() {
+ // Empty constructor
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferDataNodeHandshakeV1Req.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferDataNodeHandshakeV1Req.java
new file mode 100644
index 0000000..f4cd65b
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferDataNodeHandshakeV1Req.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request;
+
+import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq;
+
+import java.io.IOException;
+
+public class PipeTransferDataNodeHandshakeV1Req extends PipeTransferHandshakeV1Req {
+
+ private PipeTransferDataNodeHandshakeV1Req() {
+ // Empty constructor
+ }
+
+ @Override
+ protected PipeRequestType getPlanType() {
+ return PipeRequestType.HANDSHAKE_DATANODE_V1;
+ }
+
+ /////////////////////////////// Thrift ///////////////////////////////
+
+ public static PipeTransferDataNodeHandshakeV1Req toTPipeTransferReq(
+ final String timestampPrecision) throws IOException {
+ return (PipeTransferDataNodeHandshakeV1Req)
+ new PipeTransferDataNodeHandshakeV1Req().convertToTPipeTransferReq(timestampPrecision);
+ }
+
+ public static PipeTransferDataNodeHandshakeV1Req fromTPipeTransferReq(
+ final TPipeTransferReq transferReq) {
+ return (PipeTransferDataNodeHandshakeV1Req)
+ new PipeTransferDataNodeHandshakeV1Req().translateFromTPipeTransferReq(transferReq);
+ }
+
+ /////////////////////////////// Air Gap ///////////////////////////////
+
+ public static byte[] toTPipeTransferBytes(final String timestampPrecision) throws IOException {
+ return new PipeTransferDataNodeHandshakeV1Req()
+ .convertToTransferHandshakeBytes(timestampPrecision);
+ }
+
+ /////////////////////////////// Object ///////////////////////////////
+
+ @Override
+ public boolean equals(final Object obj) {
+ return obj instanceof PipeTransferDataNodeHandshakeV1Req && super.equals(obj);
+ }
+
+ @Override
+ public int hashCode() {
+ return super.hashCode();
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferDataNodeHandshakeV2Req.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferDataNodeHandshakeV2Req.java
new file mode 100644
index 0000000..3fa37bf
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferDataNodeHandshakeV2Req.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request;
+
+import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq;
+
+import java.io.IOException;
+import java.util.Map;
+
+public class PipeTransferDataNodeHandshakeV2Req extends PipeTransferHandshakeV2Req {
+
+ private PipeTransferDataNodeHandshakeV2Req() {
+ // Empty constructor
+ }
+
+ @Override
+ protected PipeRequestType getPlanType() {
+ return PipeRequestType.HANDSHAKE_DATANODE_V2;
+ }
+
+ /////////////////////////////// Thrift ///////////////////////////////
+
+ public static PipeTransferDataNodeHandshakeV2Req toTPipeTransferReq(Map params)
+ throws IOException {
+ return (PipeTransferDataNodeHandshakeV2Req)
+ new PipeTransferDataNodeHandshakeV2Req().convertToTPipeTransferReq(params);
+ }
+
+ public static PipeTransferDataNodeHandshakeV2Req fromTPipeTransferReq(
+ TPipeTransferReq transferReq) {
+ return (PipeTransferDataNodeHandshakeV2Req)
+ new PipeTransferDataNodeHandshakeV2Req().translateFromTPipeTransferReq(transferReq);
+ }
+
+ /////////////////////////////// Air Gap ///////////////////////////////
+
+ public static byte[] toTPipeTransferBytes(Map params) throws IOException {
+ return new PipeTransferDataNodeHandshakeV2Req().convertToTransferHandshakeBytes(params);
+ }
+
+ /////////////////////////////// Object ///////////////////////////////
+
+ @Override
+ public boolean equals(Object obj) {
+ return obj instanceof PipeTransferDataNodeHandshakeV2Req && super.equals(obj);
+ }
+
+ @Override
+ public int hashCode() {
+ return super.hashCode();
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferFilePieceReq.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferFilePieceReq.java
new file mode 100644
index 0000000..05ed843
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferFilePieceReq.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request;
+
+import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq;
+
+import org.apache.tsfile.utils.Binary;
+import org.apache.tsfile.utils.PublicBAOS;
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.Objects;
+
+public abstract class PipeTransferFilePieceReq extends TPipeTransferReq {
+
+ private transient String fileName;
+ private transient long startWritingOffset;
+ private transient byte[] filePiece;
+
+ protected abstract PipeRequestType getPlanType();
+
+ /////////////////////////////// Thrift ///////////////////////////////
+
+ protected final PipeTransferFilePieceReq convertToTPipeTransferReq(
+ String snapshotName, long startWritingOffset, byte[] snapshotPiece) throws IOException {
+
+ this.fileName = snapshotName;
+ this.startWritingOffset = startWritingOffset;
+ this.filePiece = snapshotPiece;
+
+ this.version = IoTDBConnectorRequestVersion.VERSION_1.getVersion();
+ this.type = getPlanType().getType();
+ try (final PublicBAOS byteArrayOutputStream = new PublicBAOS();
+ final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) {
+ ReadWriteIOUtils.write(snapshotName, outputStream);
+ ReadWriteIOUtils.write(startWritingOffset, outputStream);
+ ReadWriteIOUtils.write(new Binary(snapshotPiece), outputStream);
+ body = ByteBuffer.wrap(byteArrayOutputStream.getBuf(), 0, byteArrayOutputStream.size());
+ }
+
+ return this;
+ }
+
+ protected final PipeTransferFilePieceReq translateFromTPipeTransferReq(
+ TPipeTransferReq transferReq) {
+
+ fileName = ReadWriteIOUtils.readString(transferReq.body);
+ startWritingOffset = ReadWriteIOUtils.readLong(transferReq.body);
+ filePiece = ReadWriteIOUtils.readBinary(transferReq.body).getValues();
+
+ version = transferReq.version;
+ type = transferReq.type;
+ body = transferReq.body;
+
+ return this;
+ }
+
+ /////////////////////////////// Air Gap ///////////////////////////////
+
+ protected final byte[] convertToTPipeTransferBytes(
+ String snapshotName, long startWritingOffset, byte[] snapshotPiece) throws IOException {
+ try (final PublicBAOS byteArrayOutputStream = new PublicBAOS();
+ final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) {
+ ReadWriteIOUtils.write(IoTDBConnectorRequestVersion.VERSION_1.getVersion(), outputStream);
+ ReadWriteIOUtils.write(getPlanType().getType(), outputStream);
+ ReadWriteIOUtils.write(snapshotName, outputStream);
+ ReadWriteIOUtils.write(startWritingOffset, outputStream);
+ ReadWriteIOUtils.write(new Binary(snapshotPiece), outputStream);
+ return byteArrayOutputStream.toByteArray();
+ }
+ }
+
+ /////////////////////////////// Object ///////////////////////////////
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ PipeTransferFilePieceReq that = (PipeTransferFilePieceReq) obj;
+ return fileName.equals(that.fileName)
+ && startWritingOffset == that.startWritingOffset
+ && Arrays.equals(filePiece, that.filePiece)
+ && version == that.version
+ && type == that.type
+ && body.equals(that.body);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(
+ fileName, startWritingOffset, Arrays.hashCode(filePiece), version, type, body);
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferFileSealReqV1.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferFileSealReqV1.java
new file mode 100644
index 0000000..30f583d
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferFileSealReqV1.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request;
+
+import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq;
+
+import org.apache.tsfile.utils.PublicBAOS;
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Objects;
+
+public abstract class PipeTransferFileSealReqV1 extends TPipeTransferReq {
+
+ private transient String fileName;
+ private transient long fileLength;
+
+ protected abstract PipeRequestType getPlanType();
+
+ /////////////////////////////// Thrift ///////////////////////////////
+
+ protected PipeTransferFileSealReqV1 convertToTPipeTransferReq(String fileName, long fileLength)
+ throws IOException {
+
+ this.fileName = fileName;
+ this.fileLength = fileLength;
+
+ this.version = IoTDBConnectorRequestVersion.VERSION_1.getVersion();
+ this.type = getPlanType().getType();
+ try (final PublicBAOS byteArrayOutputStream = new PublicBAOS();
+ final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) {
+ ReadWriteIOUtils.write(fileName, outputStream);
+ ReadWriteIOUtils.write(fileLength, outputStream);
+ this.body = ByteBuffer.wrap(byteArrayOutputStream.getBuf(), 0, byteArrayOutputStream.size());
+ }
+
+ return this;
+ }
+
+ public PipeTransferFileSealReqV1 translateFromTPipeTransferReq(TPipeTransferReq req) {
+
+ fileName = ReadWriteIOUtils.readString(req.body);
+ fileLength = ReadWriteIOUtils.readLong(req.body);
+
+ version = req.version;
+ type = req.type;
+ body = req.body;
+
+ return this;
+ }
+
+ /////////////////////////////// Air Gap ///////////////////////////////
+
+ public byte[] convertToTPipeTransferSnapshotSealBytes(String fileName, long fileLength)
+ throws IOException {
+ try (final PublicBAOS byteArrayOutputStream = new PublicBAOS();
+ final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) {
+ ReadWriteIOUtils.write(IoTDBConnectorRequestVersion.VERSION_1.getVersion(), outputStream);
+ ReadWriteIOUtils.write(getPlanType().getType(), outputStream);
+ ReadWriteIOUtils.write(fileName, outputStream);
+ ReadWriteIOUtils.write(fileLength, outputStream);
+ return byteArrayOutputStream.toByteArray();
+ }
+ }
+
+ /////////////////////////////// Object ///////////////////////////////
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ PipeTransferFileSealReqV1 that = (PipeTransferFileSealReqV1) obj;
+ return fileName.equals(that.fileName)
+ && fileLength == that.fileLength
+ && version == that.version
+ && type == that.type
+ && body.equals(that.body);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(fileName, fileLength, version, type, body);
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferFileSealReqV2.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferFileSealReqV2.java
new file mode 100644
index 0000000..a380300
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferFileSealReqV2.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request;
+
+import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq;
+
+import org.apache.tsfile.utils.PublicBAOS;
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+
+public abstract class PipeTransferFileSealReqV2 extends TPipeTransferReq {
+
+ public static final String DATABASE_PATTERN = "database_pattern";
+ public static final String TREE = "tree";
+ public static final String TABLE = "table";
+ protected transient List fileNames;
+ protected transient List fileLengths;
+ protected transient Map parameters;
+
+ public final Map getParameters() {
+ return parameters;
+ }
+
+ protected abstract PipeRequestType getPlanType();
+
+ /////////////////////////////// Thrift ///////////////////////////////
+
+ protected PipeTransferFileSealReqV2 convertToTPipeTransferReq(
+ final List fileNames,
+ final List fileLengths,
+ final Map parameters)
+ throws IOException {
+
+ this.fileNames = fileNames;
+ this.fileLengths = fileLengths;
+ this.parameters = parameters;
+
+ this.version = IoTDBConnectorRequestVersion.VERSION_1.getVersion();
+ this.type = getPlanType().getType();
+ try (final PublicBAOS byteArrayOutputStream = new PublicBAOS();
+ final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) {
+ ReadWriteIOUtils.write(fileNames.size(), outputStream);
+ for (final String fileName : fileNames) {
+ ReadWriteIOUtils.write(fileName, outputStream);
+ }
+ ReadWriteIOUtils.write(fileLengths.size(), outputStream);
+ for (final Long fileLength : fileLengths) {
+ ReadWriteIOUtils.write(fileLength, outputStream);
+ }
+ ReadWriteIOUtils.write(parameters.size(), outputStream);
+ for (final Map.Entry entry : parameters.entrySet()) {
+ ReadWriteIOUtils.write(entry.getKey(), outputStream);
+ ReadWriteIOUtils.write(entry.getValue(), outputStream);
+ }
+ this.body = ByteBuffer.wrap(byteArrayOutputStream.getBuf(), 0, byteArrayOutputStream.size());
+ }
+
+ return this;
+ }
+
+ public PipeTransferFileSealReqV2 translateFromTPipeTransferReq(final TPipeTransferReq req) {
+ fileNames = new ArrayList<>();
+ int size = ReadWriteIOUtils.readInt(req.body);
+ for (int i = 0; i < size; ++i) {
+ fileNames.add(ReadWriteIOUtils.readString(req.body));
+ }
+
+ fileLengths = new ArrayList<>();
+ size = ReadWriteIOUtils.readInt(req.body);
+ for (int i = 0; i < size; ++i) {
+ fileLengths.add(ReadWriteIOUtils.readLong(req.body));
+ }
+
+ parameters = new HashMap<>();
+ size = ReadWriteIOUtils.readInt(req.body);
+ for (int i = 0; i < size; ++i) {
+ final String key = ReadWriteIOUtils.readString(req.body);
+ final String value = ReadWriteIOUtils.readString(req.body);
+ parameters.put(key, value);
+ }
+
+ version = req.version;
+ type = req.type;
+ body = req.body;
+
+ return this;
+ }
+
+ /////////////////////////////// Air Gap ///////////////////////////////
+
+ public byte[] convertToTPipeTransferSnapshotSealBytes(
+ List fileNames, List fileLengths, Map parameters)
+ throws IOException {
+ try (final PublicBAOS byteArrayOutputStream = new PublicBAOS();
+ final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) {
+ ReadWriteIOUtils.write(IoTDBConnectorRequestVersion.VERSION_1.getVersion(), outputStream);
+ ReadWriteIOUtils.write(getPlanType().getType(), outputStream);
+ ReadWriteIOUtils.write(fileNames.size(), outputStream);
+ for (String fileName : fileNames) {
+ ReadWriteIOUtils.write(fileName, outputStream);
+ }
+ ReadWriteIOUtils.write(fileLengths.size(), outputStream);
+ for (Long fileLength : fileLengths) {
+ ReadWriteIOUtils.write(fileLength, outputStream);
+ }
+ ReadWriteIOUtils.write(parameters.size(), outputStream);
+ for (final Map.Entry entry : parameters.entrySet()) {
+ ReadWriteIOUtils.write(entry.getKey(), outputStream);
+ ReadWriteIOUtils.write(entry.getValue(), outputStream);
+ }
+ return byteArrayOutputStream.toByteArray();
+ }
+ }
+
+ /////////////////////////////// Object ///////////////////////////////
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ PipeTransferFileSealReqV2 that = (PipeTransferFileSealReqV2) obj;
+ return Objects.equals(fileNames, that.fileNames)
+ && Objects.equals(fileLengths, that.fileLengths)
+ && Objects.equals(parameters, that.parameters)
+ && Objects.equals(version, that.version)
+ && Objects.equals(type, that.type)
+ && Objects.equals(body, that.body);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(fileNames, fileLengths, parameters, version, type, body);
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferHandshakeV1Req.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferHandshakeV1Req.java
new file mode 100644
index 0000000..a95bb55
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferHandshakeV1Req.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request;
+
+import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq;
+
+import org.apache.tsfile.utils.PublicBAOS;
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Objects;
+
+public abstract class PipeTransferHandshakeV1Req extends TPipeTransferReq {
+
+ private transient String timestampPrecision;
+
+ protected abstract PipeRequestType getPlanType();
+
+ /////////////////////////////// Thrift ///////////////////////////////
+
+ public final PipeTransferHandshakeV1Req convertToTPipeTransferReq(String timestampPrecision)
+ throws IOException {
+ this.timestampPrecision = timestampPrecision;
+
+ this.version = IoTDBConnectorRequestVersion.VERSION_1.getVersion();
+ this.type = getPlanType().getType();
+ try (final PublicBAOS byteArrayOutputStream = new PublicBAOS();
+ final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) {
+ ReadWriteIOUtils.write(timestampPrecision, outputStream);
+ this.body = ByteBuffer.wrap(byteArrayOutputStream.getBuf(), 0, byteArrayOutputStream.size());
+ }
+
+ return this;
+ }
+
+ protected final PipeTransferHandshakeV1Req translateFromTPipeTransferReq(
+ TPipeTransferReq transferReq) {
+ timestampPrecision = ReadWriteIOUtils.readString(transferReq.body);
+
+ version = transferReq.version;
+ type = transferReq.type;
+ body = transferReq.body;
+
+ return this;
+ }
+
+ /////////////////////////////// Air Gap ///////////////////////////////
+
+ protected final byte[] convertToTransferHandshakeBytes(String timestampPrecision)
+ throws IOException {
+ try (final PublicBAOS byteArrayOutputStream = new PublicBAOS();
+ final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) {
+ ReadWriteIOUtils.write(IoTDBConnectorRequestVersion.VERSION_1.getVersion(), outputStream);
+ ReadWriteIOUtils.write(getPlanType().getType(), outputStream);
+ ReadWriteIOUtils.write(timestampPrecision, outputStream);
+ return byteArrayOutputStream.toByteArray();
+ }
+ }
+
+ /////////////////////////////// Object ///////////////////////////////
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ PipeTransferHandshakeV1Req that = (PipeTransferHandshakeV1Req) obj;
+ return timestampPrecision.equals(that.timestampPrecision)
+ && version == that.version
+ && type == that.type
+ && body.equals(that.body);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(timestampPrecision, version, type, body);
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferHandshakeV2Req.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferHandshakeV2Req.java
new file mode 100644
index 0000000..02c1310
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferHandshakeV2Req.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request;
+
+import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq;
+
+import org.apache.tsfile.utils.PublicBAOS;
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+
+public abstract class PipeTransferHandshakeV2Req extends TPipeTransferReq {
+
+ private transient Map params;
+
+ protected abstract PipeRequestType getPlanType();
+
+ /////////////////////////////// Thrift ///////////////////////////////
+
+ protected final PipeTransferHandshakeV2Req convertToTPipeTransferReq(Map params)
+ throws IOException {
+ this.version = IoTDBConnectorRequestVersion.VERSION_1.getVersion();
+ this.type = getPlanType().getType();
+ try (final PublicBAOS byteArrayOutputStream = new PublicBAOS();
+ final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) {
+ ReadWriteIOUtils.write(params.size(), outputStream);
+ for (final Map.Entry entry : params.entrySet()) {
+ ReadWriteIOUtils.write(entry.getKey(), outputStream);
+ ReadWriteIOUtils.write(entry.getValue(), outputStream);
+ }
+ this.body = ByteBuffer.wrap(byteArrayOutputStream.getBuf(), 0, byteArrayOutputStream.size());
+ }
+
+ this.params = params;
+ return this;
+ }
+
+ protected final PipeTransferHandshakeV2Req translateFromTPipeTransferReq(
+ TPipeTransferReq transferReq) {
+ Map params = new HashMap<>();
+ final int size = ReadWriteIOUtils.readInt(transferReq.body);
+ for (int i = 0; i < size; ++i) {
+ final String key = ReadWriteIOUtils.readString(transferReq.body);
+ final String value = ReadWriteIOUtils.readString(transferReq.body);
+ params.put(key, value);
+ }
+ this.params = params;
+
+ version = transferReq.version;
+ type = transferReq.type;
+ body = transferReq.body;
+
+ return this;
+ }
+
+ /////////////////////////////// Air Gap ///////////////////////////////
+
+ public final byte[] convertToTransferHandshakeBytes(Map params)
+ throws IOException {
+ try (final PublicBAOS byteArrayOutputStream = new PublicBAOS();
+ final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) {
+ ReadWriteIOUtils.write(IoTDBConnectorRequestVersion.VERSION_1.getVersion(), outputStream);
+ ReadWriteIOUtils.write(getPlanType().getType(), outputStream);
+ ReadWriteIOUtils.write(params.size(), outputStream);
+ for (final Map.Entry entry : params.entrySet()) {
+ ReadWriteIOUtils.write(entry.getKey(), outputStream);
+ ReadWriteIOUtils.write(entry.getValue(), outputStream);
+ }
+ return byteArrayOutputStream.toByteArray();
+ }
+ }
+
+ /////////////////////////////// Object ///////////////////////////////
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ PipeTransferHandshakeV2Req that = (PipeTransferHandshakeV2Req) obj;
+ return Objects.equals(params, that.params)
+ && version == that.version
+ && type == that.type
+ && Objects.equals(body, that.body);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(params, version, type, body);
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferSchemaSnapshotPieceReq.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferSchemaSnapshotPieceReq.java
new file mode 100644
index 0000000..518fd73
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferSchemaSnapshotPieceReq.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request;
+
+import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq;
+
+import java.io.IOException;
+
+public class PipeTransferSchemaSnapshotPieceReq extends PipeTransferFilePieceReq {
+
+ private PipeTransferSchemaSnapshotPieceReq() {
+ // Empty constructor
+ }
+
+ @Override
+ protected PipeRequestType getPlanType() {
+ return PipeRequestType.TRANSFER_SCHEMA_SNAPSHOT_PIECE;
+ }
+
+ /////////////////////////////// Thrift ///////////////////////////////
+
+ public static PipeTransferSchemaSnapshotPieceReq toTPipeTransferReq(
+ String fileName, long startWritingOffset, byte[] filePiece) throws IOException {
+ return (PipeTransferSchemaSnapshotPieceReq)
+ new PipeTransferSchemaSnapshotPieceReq()
+ .convertToTPipeTransferReq(fileName, startWritingOffset, filePiece);
+ }
+
+ public static PipeTransferSchemaSnapshotPieceReq fromTPipeTransferReq(
+ TPipeTransferReq transferReq) {
+ return (PipeTransferSchemaSnapshotPieceReq)
+ new PipeTransferSchemaSnapshotPieceReq().translateFromTPipeTransferReq(transferReq);
+ }
+
+ /////////////////////////////// Air Gap ///////////////////////////////
+
+ public static byte[] toTPipeTransferBytes(
+ String fileName, long startWritingOffset, byte[] filePiece) throws IOException {
+ return new PipeTransferSchemaSnapshotPieceReq()
+ .convertToTPipeTransferBytes(fileName, startWritingOffset, filePiece);
+ }
+
+ /////////////////////////////// Object ///////////////////////////////
+
+ @Override
+ public boolean equals(Object obj) {
+ return obj instanceof PipeTransferSchemaSnapshotPieceReq && super.equals(obj);
+ }
+
+ @Override
+ public int hashCode() {
+ return super.hashCode();
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferSchemaSnapshotSealReq.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferSchemaSnapshotSealReq.java
new file mode 100644
index 0000000..32d8bef
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferSchemaSnapshotSealReq.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request;
+
+import org.apache.iotdb.collector.plugin.builtin.sink.constant.ColumnHeaderConstant;
+import org.apache.iotdb.collector.plugin.builtin.sink.protocol.session.IClientSession;
+import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+
+public class PipeTransferSchemaSnapshotSealReq extends PipeTransferFileSealReqV2 {
+
+ private PipeTransferSchemaSnapshotSealReq() {
+ // Empty constructor
+ }
+
+ @Override
+ protected PipeRequestType getPlanType() {
+ return PipeRequestType.TRANSFER_SCHEMA_SNAPSHOT_SEAL;
+ }
+
+ /////////////////////////////// Thrift ///////////////////////////////
+
+ public static PipeTransferSchemaSnapshotSealReq toTPipeTransferReq(
+ final String treePattern,
+ final String tablePatternDatabase,
+ final String tablePatternTable,
+ final boolean isTreeCaptured,
+ final boolean isTableCaptured,
+ final String mTreeSnapshotName,
+ final long mTreeSnapshotLength,
+ final String tLogName,
+ final long tLogLength,
+ final String attributeSnapshotName,
+ final long attributeSnapshotLength,
+ final String databaseName,
+ final String typeString)
+ throws IOException {
+ final Map parameters = new HashMap<>();
+ parameters.put(ColumnHeaderConstant.PATH_PATTERN, treePattern);
+ parameters.put(DATABASE_PATTERN, tablePatternDatabase);
+ parameters.put(ColumnHeaderConstant.TABLE_NAME, tablePatternTable);
+ if (isTreeCaptured) {
+ parameters.put(IClientSession.SqlDialect.TREE.toString(), "");
+ }
+ if (isTableCaptured) {
+ parameters.put(IClientSession.SqlDialect.TABLE.toString(), "");
+ }
+ parameters.put(ColumnHeaderConstant.DATABASE, databaseName);
+ parameters.put(ColumnHeaderConstant.TYPE, typeString);
+
+ final List fileNameList;
+ final List fileLengthList;
+
+ // Tree model sync
+ if (Objects.isNull(attributeSnapshotName)) {
+ fileNameList =
+ Objects.nonNull(tLogName)
+ ? Arrays.asList(mTreeSnapshotName, tLogName)
+ : Collections.singletonList(mTreeSnapshotName);
+ fileLengthList =
+ Objects.nonNull(tLogName)
+ ? Arrays.asList(mTreeSnapshotLength, tLogLength)
+ : Collections.singletonList(mTreeSnapshotLength);
+ } else {
+ fileNameList = Arrays.asList(mTreeSnapshotName, tLogName, attributeSnapshotName);
+ fileLengthList = Arrays.asList(mTreeSnapshotLength, tLogLength, attributeSnapshotLength);
+ }
+
+ return (PipeTransferSchemaSnapshotSealReq)
+ new PipeTransferSchemaSnapshotSealReq()
+ .convertToTPipeTransferReq(fileNameList, fileLengthList, parameters);
+ }
+
+ public static PipeTransferSchemaSnapshotSealReq fromTPipeTransferReq(final TPipeTransferReq req) {
+ return (PipeTransferSchemaSnapshotSealReq)
+ new PipeTransferSchemaSnapshotSealReq().translateFromTPipeTransferReq(req);
+ }
+
+ /////////////////////////////// Air Gap ///////////////////////////////
+
+ public static byte[] toTPipeTransferBytes(
+ final String treePattern,
+ final String tablePatternDatabase,
+ final String tablePatternTable,
+ final boolean isTreeCaptured,
+ final boolean isTableCaptured,
+ final String mTreeSnapshotName,
+ final long mTreeSnapshotLength,
+ final String tLogName,
+ final long tLogLength,
+ final String attributeSnapshotName,
+ final long attributeSnapshotLength,
+ final String databaseName,
+ final String typeString)
+ throws IOException {
+ final Map parameters = new HashMap<>();
+ parameters.put(ColumnHeaderConstant.PATH_PATTERN, treePattern);
+ parameters.put(DATABASE_PATTERN, tablePatternDatabase);
+ parameters.put(ColumnHeaderConstant.TABLE_NAME, tablePatternTable);
+ if (isTreeCaptured) {
+ parameters.put(TREE, "");
+ }
+ if (isTableCaptured) {
+ parameters.put(TABLE, "");
+ }
+ parameters.put(ColumnHeaderConstant.DATABASE, databaseName);
+ parameters.put(ColumnHeaderConstant.TYPE, typeString);
+
+ final List fileNameList;
+ final List fileLengthList;
+
+ // Tree model sync
+ if (Objects.isNull(attributeSnapshotName)) {
+ fileNameList =
+ Objects.nonNull(tLogName)
+ ? Arrays.asList(mTreeSnapshotName, tLogName)
+ : Collections.singletonList(mTreeSnapshotName);
+ fileLengthList =
+ Objects.nonNull(tLogName)
+ ? Arrays.asList(mTreeSnapshotLength, tLogLength)
+ : Collections.singletonList(mTreeSnapshotLength);
+ } else {
+ fileNameList = Arrays.asList(mTreeSnapshotName, tLogName, attributeSnapshotName);
+ fileLengthList = Arrays.asList(mTreeSnapshotLength, tLogLength, attributeSnapshotLength);
+ }
+
+ return new PipeTransferSchemaSnapshotSealReq()
+ .convertToTPipeTransferSnapshotSealBytes(fileNameList, fileLengthList, parameters);
+ }
+
+ /////////////////////////////// Object ///////////////////////////////
+
+ @Override
+ public boolean equals(final Object obj) {
+ return obj instanceof PipeTransferSchemaSnapshotSealReq && super.equals(obj);
+ }
+
+ @Override
+ public int hashCode() {
+ return super.hashCode();
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferSliceReq.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferSliceReq.java
new file mode 100644
index 0000000..48cee90
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferSliceReq.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request;
+
+import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq;
+
+import org.apache.tsfile.utils.Binary;
+import org.apache.tsfile.utils.PublicBAOS;
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.Objects;
+
+public class PipeTransferSliceReq extends TPipeTransferReq {
+
+ private transient int orderId;
+
+ private transient short originReqType;
+ private transient int originBodySize;
+
+ private transient byte[] sliceBody;
+
+ private transient int sliceIndex;
+ private transient int sliceCount;
+
+ /////////////////////////////// Thrift ///////////////////////////////
+
+ public static PipeTransferSliceReq toTPipeTransferReq(
+ final int orderId,
+ final short originReqType,
+ final int sliceIndex,
+ final int sliceCount,
+ final ByteBuffer duplicatedOriginBody,
+ final int startIndexInBody,
+ final int endIndexInBody)
+ throws IOException {
+ final PipeTransferSliceReq sliceReq = new PipeTransferSliceReq();
+
+ sliceReq.orderId = orderId;
+
+ sliceReq.originReqType = originReqType;
+ sliceReq.originBodySize = duplicatedOriginBody.limit();
+
+ sliceReq.sliceBody = new byte[endIndexInBody - startIndexInBody];
+ duplicatedOriginBody.position(startIndexInBody);
+ duplicatedOriginBody.get(sliceReq.sliceBody);
+
+ sliceReq.sliceIndex = sliceIndex;
+ sliceReq.sliceCount = sliceCount;
+
+ sliceReq.version = IoTDBConnectorRequestVersion.VERSION_1.getVersion();
+ sliceReq.type = PipeRequestType.TRANSFER_SLICE.getType();
+ try (final PublicBAOS byteArrayOutputStream = new PublicBAOS();
+ final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) {
+ ReadWriteIOUtils.write(sliceReq.orderId, outputStream);
+
+ ReadWriteIOUtils.write(sliceReq.originReqType, outputStream);
+ ReadWriteIOUtils.write(sliceReq.originBodySize, outputStream);
+
+ ReadWriteIOUtils.write(new Binary(sliceReq.sliceBody), outputStream);
+
+ ReadWriteIOUtils.write(sliceReq.sliceIndex, outputStream);
+ ReadWriteIOUtils.write(sliceReq.sliceCount, outputStream);
+
+ sliceReq.body =
+ ByteBuffer.wrap(byteArrayOutputStream.getBuf(), 0, byteArrayOutputStream.size());
+ }
+
+ return sliceReq;
+ }
+
+ public static PipeTransferSliceReq fromTPipeTransferReq(final TPipeTransferReq transferReq) {
+ final PipeTransferSliceReq sliceReq = new PipeTransferSliceReq();
+
+ sliceReq.orderId = ReadWriteIOUtils.readInt(transferReq.body);
+
+ sliceReq.originReqType = ReadWriteIOUtils.readShort(transferReq.body);
+ sliceReq.originBodySize = ReadWriteIOUtils.readInt(transferReq.body);
+
+ sliceReq.sliceBody = ReadWriteIOUtils.readBinary(transferReq.body).getValues();
+
+ sliceReq.sliceIndex = ReadWriteIOUtils.readInt(transferReq.body);
+ sliceReq.sliceCount = ReadWriteIOUtils.readInt(transferReq.body);
+
+ sliceReq.version = transferReq.version;
+ sliceReq.type = transferReq.type;
+ sliceReq.body = transferReq.body;
+
+ return sliceReq;
+ }
+
+ /////////////////////////////// Object ///////////////////////////////
+
+ @Override
+ public boolean equals(final Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ final PipeTransferSliceReq that = (PipeTransferSliceReq) obj;
+ return Objects.equals(orderId, that.orderId)
+ && Objects.equals(originReqType, that.originReqType)
+ && Objects.equals(originBodySize, that.originBodySize)
+ && Arrays.equals(sliceBody, that.sliceBody)
+ && Objects.equals(sliceIndex, that.sliceIndex)
+ && Objects.equals(sliceCount, that.sliceCount)
+ && Objects.equals(version, that.version)
+ && Objects.equals(type, that.type)
+ && Objects.equals(body, that.body);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(
+ orderId,
+ originReqType,
+ originBodySize,
+ Arrays.hashCode(sliceBody),
+ sliceIndex,
+ sliceCount,
+ version,
+ type,
+ body);
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTabletBatchReqV2.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTabletBatchReqV2.java
new file mode 100644
index 0000000..f59678e
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTabletBatchReqV2.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request;
+
+import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq;
+
+import org.apache.tsfile.utils.PublicBAOS;
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+
+public class PipeTransferTabletBatchReqV2 extends TPipeTransferReq {
+
+ private final transient List binaryReqs = new ArrayList<>();
+ private final transient List tabletReqs = new ArrayList<>();
+
+ private PipeTransferTabletBatchReqV2() {
+ // Empty constructor
+ }
+
+ /////////////////////////////// Thrift ///////////////////////////////
+
+ public static PipeTransferTabletBatchReqV2 toTPipeTransferReq(
+ final List binaryBuffers,
+ final List insertNodeBuffers,
+ final List tabletBuffers,
+ final List binaryDataBases,
+ final List insertNodeDataBases,
+ final List tabletDataBases)
+ throws IOException {
+ final PipeTransferTabletBatchReqV2 batchReq = new PipeTransferTabletBatchReqV2();
+
+ batchReq.version = IoTDBConnectorRequestVersion.VERSION_1.getVersion();
+ batchReq.type = PipeRequestType.TRANSFER_TABLET_BATCH_V2.getType();
+ try (final PublicBAOS byteArrayOutputStream = new PublicBAOS();
+ final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) {
+ ReadWriteIOUtils.write(binaryBuffers.size(), outputStream);
+ for (int i = 0; i < binaryBuffers.size(); i++) {
+ final ByteBuffer binaryBuffer = binaryBuffers.get(i);
+ ReadWriteIOUtils.write(binaryBuffer.limit(), outputStream);
+ outputStream.write(binaryBuffer.array(), 0, binaryBuffer.limit());
+ ReadWriteIOUtils.write(binaryDataBases.get(i), outputStream);
+ }
+
+ ReadWriteIOUtils.write(insertNodeBuffers.size(), outputStream);
+ for (int i = 0; i < insertNodeBuffers.size(); i++) {
+ final ByteBuffer insertNodeBuffer = insertNodeBuffers.get(i);
+ outputStream.write(insertNodeBuffer.array(), 0, insertNodeBuffer.limit());
+ ReadWriteIOUtils.write(insertNodeDataBases.get(i), outputStream);
+ }
+
+ ReadWriteIOUtils.write(tabletBuffers.size(), outputStream);
+ for (int i = 0; i < tabletBuffers.size(); i++) {
+ final ByteBuffer tabletBuffer = tabletBuffers.get(i);
+ outputStream.write(tabletBuffer.array(), 0, tabletBuffer.limit());
+ ReadWriteIOUtils.write(tabletDataBases.get(i), outputStream);
+ }
+
+ batchReq.body =
+ ByteBuffer.wrap(byteArrayOutputStream.getBuf(), 0, byteArrayOutputStream.size());
+ }
+
+ return batchReq;
+ }
+
+ // public static PipeTransferTabletBatchReqV2 fromTPipeTransferReq(
+ // final TPipeTransferReq transferReq) {
+ // final PipeTransferTabletBatchReqV2 batchReq = new PipeTransferTabletBatchReqV2();
+ //
+ // int size = ReadWriteIOUtils.readInt(transferReq.body);
+ // for (int i = 0; i < size; ++i) {
+ // final int length = ReadWriteIOUtils.readInt(transferReq.body);
+ // final byte[] body = new byte[length];
+ // transferReq.body.get(body);
+ // batchReq.binaryReqs.add(
+ // PipeTransferTabletBinaryReqV2.toTPipeTransferBinaryReq(
+ // ByteBuffer.wrap(body), ReadWriteIOUtils.readString(transferReq.body)));
+ // }
+ //
+ // size = ReadWriteIOUtils.readInt(transferReq.body);
+ // for (int i = 0; i < size; ++i) {
+ // batchReq.insertNodeReqs.add(
+ // PipeTransferTabletInsertNodeReqV2.toTabletInsertNodeReq(
+ // (InsertNode) PlanFragment.deserializeHelper(transferReq.body, null),
+ // ReadWriteIOUtils.readString(transferReq.body)));
+ // }
+ //
+ // size = ReadWriteIOUtils.readInt(transferReq.body);
+ // for (int i = 0; i < size; ++i) {
+ // batchReq.tabletReqs.add(
+ // PipeTransferTabletRawReqV2.toTPipeTransferRawReq(
+ // Tablet.deserialize(transferReq.body),
+ // ReadWriteIOUtils.readBool(transferReq.body),
+ // ReadWriteIOUtils.readString(transferReq.body)));
+ // }
+ //
+ // batchReq.version = transferReq.version;
+ // batchReq.type = transferReq.type;
+ // batchReq.body = transferReq.body;
+ //
+ // return batchReq;
+ // }
+
+ /////////////////////////////// Object ///////////////////////////////
+
+ @Override
+ public boolean equals(final Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ final PipeTransferTabletBatchReqV2 that = (PipeTransferTabletBatchReqV2) obj;
+ return Objects.equals(binaryReqs, that.binaryReqs)
+ && Objects.equals(tabletReqs, that.tabletReqs)
+ && version == that.version
+ && type == that.type
+ && Objects.equals(body, that.body);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(binaryReqs, tabletReqs, version, type, body);
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTabletBinaryReq.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTabletBinaryReq.java
new file mode 100644
index 0000000..cc9259d
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTabletBinaryReq.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request;
+
+import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq;
+
+import org.apache.tsfile.utils.BytesUtils;
+import org.apache.tsfile.utils.PublicBAOS;
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Objects;
+
+public class PipeTransferTabletBinaryReq extends TPipeTransferReq {
+
+ protected transient ByteBuffer byteBuffer;
+
+ protected PipeTransferTabletBinaryReq() {
+ // Do nothing
+ }
+
+ /////////////////////////////// Thrift ///////////////////////////////
+
+ public static PipeTransferTabletBinaryReq toTPipeTransferReq(final ByteBuffer byteBuffer) {
+ final PipeTransferTabletBinaryReq req = new PipeTransferTabletBinaryReq();
+ req.byteBuffer = byteBuffer;
+
+ req.version = IoTDBConnectorRequestVersion.VERSION_1.getVersion();
+ req.type = PipeRequestType.TRANSFER_TABLET_BINARY.getType();
+ req.body = byteBuffer;
+
+ return req;
+ }
+
+ public static PipeTransferTabletBinaryReq fromTPipeTransferReq(
+ final TPipeTransferReq transferReq) {
+ final PipeTransferTabletBinaryReq binaryReq = new PipeTransferTabletBinaryReq();
+ binaryReq.byteBuffer = transferReq.body;
+
+ binaryReq.version = transferReq.version;
+ binaryReq.type = transferReq.type;
+ binaryReq.body = transferReq.body;
+
+ return binaryReq;
+ }
+
+ /////////////////////////////// Air Gap ///////////////////////////////
+
+ public static byte[] toTPipeTransferBytes(final ByteBuffer byteBuffer) throws IOException {
+ try (final PublicBAOS byteArrayOutputStream = new PublicBAOS();
+ final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) {
+ ReadWriteIOUtils.write(IoTDBConnectorRequestVersion.VERSION_1.getVersion(), outputStream);
+ ReadWriteIOUtils.write(PipeRequestType.TRANSFER_TABLET_BINARY.getType(), outputStream);
+ return BytesUtils.concatByteArray(byteArrayOutputStream.toByteArray(), byteBuffer.array());
+ }
+ }
+
+ /////////////////////////////// Object ///////////////////////////////
+
+ @Override
+ public boolean equals(final Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ final PipeTransferTabletBinaryReq that = (PipeTransferTabletBinaryReq) obj;
+ return byteBuffer.equals(that.byteBuffer)
+ && version == that.version
+ && type == that.type
+ && body.equals(that.body);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(byteBuffer, version, type, body);
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTabletBinaryReqV2.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTabletBinaryReqV2.java
new file mode 100644
index 0000000..621b282
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTabletBinaryReqV2.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request;
+
+import org.apache.tsfile.utils.PublicBAOS;
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Objects;
+
+public class PipeTransferTabletBinaryReqV2 extends PipeTransferTabletBinaryReq {
+
+ private transient String dataBaseName;
+
+ protected PipeTransferTabletBinaryReqV2() {
+ // Do nothing
+ }
+
+ /////////////////////////////// Batch ///////////////////////////////
+
+ public static PipeTransferTabletBinaryReqV2 toTPipeTransferBinaryReq(
+ final ByteBuffer byteBuffer, final String dataBaseName) {
+ final PipeTransferTabletBinaryReqV2 req = new PipeTransferTabletBinaryReqV2();
+
+ req.byteBuffer = byteBuffer;
+ req.dataBaseName = dataBaseName;
+ req.version = IoTDBConnectorRequestVersion.VERSION_1.getVersion();
+ req.type = PipeRequestType.TRANSFER_TABLET_BINARY_V2.getType();
+
+ return req;
+ }
+
+ /////////////////////////////// Thrift ///////////////////////////////
+
+ public static PipeTransferTabletBinaryReqV2 toTPipeTransferReq(
+ final ByteBuffer byteBuffer, final String dataBaseName) throws IOException {
+ final PipeTransferTabletBinaryReqV2 req = new PipeTransferTabletBinaryReqV2();
+ req.byteBuffer = byteBuffer;
+ req.dataBaseName = dataBaseName;
+
+ req.version = IoTDBConnectorRequestVersion.VERSION_1.getVersion();
+ req.type = PipeRequestType.TRANSFER_TABLET_BINARY_V2.getType();
+ try (final PublicBAOS byteArrayOutputStream = new PublicBAOS();
+ final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) {
+ ReadWriteIOUtils.write(byteBuffer.limit(), outputStream);
+ outputStream.write(byteBuffer.array(), 0, byteBuffer.limit());
+ ReadWriteIOUtils.write(dataBaseName, outputStream);
+ req.body = ByteBuffer.wrap(byteArrayOutputStream.getBuf(), 0, byteArrayOutputStream.size());
+ }
+
+ return req;
+ }
+
+ public static PipeTransferTabletBinaryReqV2 fromTPipeTransferReq(
+ final org.apache.iotdb.service.rpc.thrift.TPipeTransferReq transferReq) {
+ final PipeTransferTabletBinaryReqV2 binaryReq = new PipeTransferTabletBinaryReqV2();
+
+ final int length = ReadWriteIOUtils.readInt(transferReq.body);
+ final byte[] body = new byte[length];
+ transferReq.body.get(body);
+ binaryReq.byteBuffer = ByteBuffer.wrap(body);
+ binaryReq.dataBaseName = ReadWriteIOUtils.readString(transferReq.body);
+
+ binaryReq.version = transferReq.version;
+ binaryReq.type = transferReq.type;
+ binaryReq.body = transferReq.body;
+
+ return binaryReq;
+ }
+
+ /////////////////////////////// Air Gap ///////////////////////////////
+
+ public static byte[] toTPipeTransferBytes(final ByteBuffer byteBuffer, final String dataBaseName)
+ throws IOException {
+ try (final PublicBAOS byteArrayOutputStream = new PublicBAOS();
+ final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) {
+ ReadWriteIOUtils.write(IoTDBConnectorRequestVersion.VERSION_1.getVersion(), outputStream);
+ ReadWriteIOUtils.write(PipeRequestType.TRANSFER_TABLET_BINARY_V2.getType(), outputStream);
+ ReadWriteIOUtils.write(byteBuffer.limit(), outputStream);
+ outputStream.write(byteBuffer.array(), 0, byteBuffer.limit());
+ ReadWriteIOUtils.write(dataBaseName, outputStream);
+ return byteArrayOutputStream.toByteArray();
+ }
+ }
+
+ /////////////////////////////// Object ///////////////////////////////
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ if (!super.equals(o)) {
+ return false;
+ }
+ final PipeTransferTabletBinaryReqV2 that = (PipeTransferTabletBinaryReqV2) o;
+ return Objects.equals(dataBaseName, that.dataBaseName);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(super.hashCode(), dataBaseName);
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTabletRawReq.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTabletRawReq.java
new file mode 100644
index 0000000..b1b3fa9
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTabletRawReq.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request;
+
+import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq;
+
+import org.apache.tsfile.utils.PublicBAOS;
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+import org.apache.tsfile.write.record.Tablet;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Objects;
+
+public class PipeTransferTabletRawReq extends TPipeTransferReq {
+
+ protected transient Tablet tablet;
+ protected transient boolean isAligned;
+
+ public Tablet getTablet() {
+ return tablet;
+ }
+
+ public boolean getIsAligned() {
+ return isAligned;
+ }
+
+ /////////////////////////////// WriteBack & Batch ///////////////////////////////
+
+ public static PipeTransferTabletRawReq toTPipeTransferRawReq(
+ final Tablet tablet, final boolean isAligned) {
+ final PipeTransferTabletRawReq tabletReq = new PipeTransferTabletRawReq();
+
+ tabletReq.tablet = tablet;
+ tabletReq.isAligned = isAligned;
+
+ return tabletReq;
+ }
+
+ /////////////////////////////// Thrift ///////////////////////////////
+
+ public static PipeTransferTabletRawReq toTPipeTransferReq(
+ final Tablet tablet, final boolean isAligned) throws IOException {
+ PipeTransferTabletRawReq tabletReq = toTPipeTransferRawReq(tablet, isAligned);
+
+ tabletReq.version = IoTDBConnectorRequestVersion.VERSION_1.getVersion();
+ tabletReq.type = PipeRequestType.TRANSFER_TABLET_RAW.getType();
+ try (final PublicBAOS byteArrayOutputStream = new PublicBAOS();
+ final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) {
+ tablet.serialize(outputStream);
+ ReadWriteIOUtils.write(isAligned, outputStream);
+ tabletReq.body =
+ ByteBuffer.wrap(byteArrayOutputStream.getBuf(), 0, byteArrayOutputStream.size());
+ }
+
+ return tabletReq;
+ }
+
+ public static PipeTransferTabletRawReq fromTPipeTransferReq(final TPipeTransferReq transferReq) {
+ final PipeTransferTabletRawReq tabletReq = new PipeTransferTabletRawReq();
+
+ tabletReq.tablet = Tablet.deserialize(transferReq.body);
+ tabletReq.isAligned = ReadWriteIOUtils.readBool(transferReq.body);
+
+ tabletReq.version = transferReq.version;
+ tabletReq.type = transferReq.type;
+ tabletReq.body = transferReq.body;
+
+ return tabletReq;
+ }
+
+ /////////////////////////////// Air Gap ///////////////////////////////
+
+ public static byte[] toTPipeTransferBytes(final Tablet tablet, final boolean isAligned)
+ throws IOException {
+ try (final PublicBAOS byteArrayOutputStream = new PublicBAOS();
+ final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) {
+ ReadWriteIOUtils.write(IoTDBConnectorRequestVersion.VERSION_1.getVersion(), outputStream);
+ ReadWriteIOUtils.write(PipeRequestType.TRANSFER_TABLET_RAW.getType(), outputStream);
+ tablet.serialize(outputStream);
+ ReadWriteIOUtils.write(isAligned, outputStream);
+ return byteArrayOutputStream.toByteArray();
+ }
+ }
+
+ /////////////////////////////// Object ///////////////////////////////
+
+ @Override
+ public boolean equals(final Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ final PipeTransferTabletRawReq that = (PipeTransferTabletRawReq) obj;
+ return Objects.equals(tablet, that.tablet)
+ && isAligned == that.isAligned
+ && version == that.version
+ && type == that.type
+ && Objects.equals(body, that.body);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(tablet, isAligned, version, type, body);
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTabletRawReqV2.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTabletRawReqV2.java
new file mode 100644
index 0000000..45f51df
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTabletRawReqV2.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request;
+
+import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq;
+
+import org.apache.tsfile.utils.PublicBAOS;
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+import org.apache.tsfile.write.record.Tablet;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Objects;
+
+public class PipeTransferTabletRawReqV2 extends PipeTransferTabletRawReq {
+
+ protected transient String dataBaseName;
+
+ /////////////////////////////// WriteBack & Batch ///////////////////////////////
+
+ public static PipeTransferTabletRawReqV2 toTPipeTransferRawReq(
+ final Tablet tablet, final boolean isAligned, final String dataBaseName) {
+ final PipeTransferTabletRawReqV2 tabletReq = new PipeTransferTabletRawReqV2();
+
+ tabletReq.tablet = tablet;
+ tabletReq.isAligned = isAligned;
+ tabletReq.dataBaseName = dataBaseName;
+ tabletReq.version = IoTDBConnectorRequestVersion.VERSION_1.getVersion();
+ tabletReq.type = PipeRequestType.TRANSFER_TABLET_RAW_V2.getType();
+
+ return tabletReq;
+ }
+
+ /////////////////////////////// Thrift ///////////////////////////////
+
+ public static PipeTransferTabletRawReqV2 toTPipeTransferReq(
+ final Tablet tablet, final boolean isAligned, final String dataBaseName) throws IOException {
+ PipeTransferTabletRawReqV2 tabletReq = toTPipeTransferRawReq(tablet, isAligned, dataBaseName);
+
+ try (final PublicBAOS byteArrayOutputStream = new PublicBAOS();
+ final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) {
+ tablet.serialize(outputStream);
+ ReadWriteIOUtils.write(isAligned, outputStream);
+ ReadWriteIOUtils.write(dataBaseName, outputStream);
+ tabletReq.body =
+ ByteBuffer.wrap(byteArrayOutputStream.getBuf(), 0, byteArrayOutputStream.size());
+ }
+
+ return tabletReq;
+ }
+
+ public static PipeTransferTabletRawReqV2 fromTPipeTransferReq(
+ final TPipeTransferReq transferReq) {
+ final PipeTransferTabletRawReqV2 tabletReq = new PipeTransferTabletRawReqV2();
+
+ tabletReq.tablet = Tablet.deserialize(transferReq.body);
+ tabletReq.isAligned = ReadWriteIOUtils.readBool(transferReq.body);
+ tabletReq.dataBaseName = ReadWriteIOUtils.readString(transferReq.body);
+
+ tabletReq.version = transferReq.version;
+ tabletReq.type = transferReq.type;
+ tabletReq.body = transferReq.body;
+
+ return tabletReq;
+ }
+
+ /////////////////////////////// Air Gap ///////////////////////////////
+
+ public static byte[] toTPipeTransferBytes(
+ final Tablet tablet, final boolean isAligned, final String dataBaseName) throws IOException {
+ try (final PublicBAOS byteArrayOutputStream = new PublicBAOS();
+ final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) {
+ ReadWriteIOUtils.write(IoTDBConnectorRequestVersion.VERSION_1.getVersion(), outputStream);
+ ReadWriteIOUtils.write(PipeRequestType.TRANSFER_TABLET_RAW_V2.getType(), outputStream);
+ tablet.serialize(outputStream);
+ ReadWriteIOUtils.write(isAligned, outputStream);
+ ReadWriteIOUtils.write(dataBaseName, outputStream);
+ return byteArrayOutputStream.toByteArray();
+ }
+ }
+
+ /////////////////////////////// Object ///////////////////////////////
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ if (!super.equals(o)) {
+ return false;
+ }
+ final PipeTransferTabletRawReqV2 that = (PipeTransferTabletRawReqV2) o;
+ return Objects.equals(dataBaseName, that.dataBaseName);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(super.hashCode(), dataBaseName);
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTsFilePieceReq.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTsFilePieceReq.java
new file mode 100644
index 0000000..60afaec
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTsFilePieceReq.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request;
+
+import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq;
+
+import java.io.IOException;
+
+public class PipeTransferTsFilePieceReq extends PipeTransferFilePieceReq {
+
+ private PipeTransferTsFilePieceReq() {
+ // Empty constructor
+ }
+
+ @Override
+ protected PipeRequestType getPlanType() {
+ return PipeRequestType.TRANSFER_TS_FILE_PIECE;
+ }
+
+ /////////////////////////////// Thrift ///////////////////////////////
+
+ public static PipeTransferTsFilePieceReq toTPipeTransferReq(
+ String fileName, long startWritingOffset, byte[] filePiece) throws IOException {
+ return (PipeTransferTsFilePieceReq)
+ new PipeTransferTsFilePieceReq()
+ .convertToTPipeTransferReq(fileName, startWritingOffset, filePiece);
+ }
+
+ public static PipeTransferTsFilePieceReq fromTPipeTransferReq(TPipeTransferReq transferReq) {
+ return (PipeTransferTsFilePieceReq)
+ new PipeTransferTsFilePieceReq().translateFromTPipeTransferReq(transferReq);
+ }
+
+ /////////////////////////////// Air Gap ///////////////////////////////
+
+ public static byte[] toTPipeTransferBytes(
+ String fileName, long startWritingOffset, byte[] filePiece) throws IOException {
+ return new PipeTransferTsFilePieceReq()
+ .convertToTPipeTransferBytes(fileName, startWritingOffset, filePiece);
+ }
+
+ /////////////////////////////// Object ///////////////////////////////
+
+ @Override
+ public boolean equals(Object obj) {
+ return obj instanceof PipeTransferTsFilePieceReq && super.equals(obj);
+ }
+
+ @Override
+ public int hashCode() {
+ return super.hashCode();
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTsFilePieceWithModReq.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTsFilePieceWithModReq.java
new file mode 100644
index 0000000..3cb7996
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTsFilePieceWithModReq.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request;
+
+import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq;
+
+import java.io.IOException;
+
+public class PipeTransferTsFilePieceWithModReq extends PipeTransferFilePieceReq {
+
+ private PipeTransferTsFilePieceWithModReq() {
+ // Empty constructor
+ }
+
+ @Override
+ protected PipeRequestType getPlanType() {
+ return PipeRequestType.TRANSFER_TS_FILE_PIECE_WITH_MOD;
+ }
+
+ /////////////////////////////// Thrift ///////////////////////////////
+
+ public static PipeTransferTsFilePieceWithModReq toTPipeTransferReq(
+ String fileName, long startWritingOffset, byte[] filePiece) throws IOException {
+ return (PipeTransferTsFilePieceWithModReq)
+ new PipeTransferTsFilePieceWithModReq()
+ .convertToTPipeTransferReq(fileName, startWritingOffset, filePiece);
+ }
+
+ public static PipeTransferTsFilePieceWithModReq fromTPipeTransferReq(
+ TPipeTransferReq transferReq) {
+ return (PipeTransferTsFilePieceWithModReq)
+ new PipeTransferTsFilePieceWithModReq().translateFromTPipeTransferReq(transferReq);
+ }
+
+ /////////////////////////////// Air Gap ///////////////////////////////
+
+ public static byte[] toTPipeTransferBytes(
+ String fileName, long startWritingOffset, byte[] filePiece) throws IOException {
+ return new PipeTransferTsFilePieceWithModReq()
+ .convertToTPipeTransferBytes(fileName, startWritingOffset, filePiece);
+ }
+
+ /////////////////////////////// Object ///////////////////////////////
+
+ @Override
+ public boolean equals(Object obj) {
+ return obj instanceof PipeTransferTsFilePieceWithModReq && super.equals(obj);
+ }
+
+ @Override
+ public int hashCode() {
+ return super.hashCode();
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTsFileSealWithModReq.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTsFileSealWithModReq.java
new file mode 100644
index 0000000..b6e15dc
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/request/PipeTransferTsFileSealWithModReq.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request;
+
+import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+
+public class PipeTransferTsFileSealWithModReq extends PipeTransferFileSealReqV2 {
+
+ private PipeTransferTsFileSealWithModReq() {
+ // Empty constructor
+ }
+
+ @Override
+ protected PipeRequestType getPlanType() {
+ return PipeRequestType.TRANSFER_TS_FILE_SEAL_WITH_MOD;
+ }
+
+ protected static final String DATABASE_NAME_KEY_PREFIX = "DATABASE_NAME_";
+
+ protected static String generateDatabaseNameWithFileNameKey(final String fileName) {
+ return DATABASE_NAME_KEY_PREFIX + fileName;
+ }
+
+ /////////////////////////////// Thrift ///////////////////////////////
+
+ public static PipeTransferTsFileSealWithModReq toTPipeTransferReq(
+ final String modFileName,
+ final long modFileLength,
+ final String tsFileName,
+ final long tsFileLength)
+ throws IOException {
+ return toTPipeTransferReq(modFileName, modFileLength, tsFileName, tsFileLength, null);
+ }
+
+ public static PipeTransferTsFileSealWithModReq toTPipeTransferReq(
+ final String modFileName,
+ final long modFileLength,
+ final String tsFileName,
+ final long tsFileLength,
+ final String dataBaseName)
+ throws IOException {
+ return (PipeTransferTsFileSealWithModReq)
+ new PipeTransferTsFileSealWithModReq()
+ .convertToTPipeTransferReq(
+ Arrays.asList(modFileName, tsFileName),
+ Arrays.asList(modFileLength, tsFileLength),
+ Collections.singletonMap(
+ generateDatabaseNameWithFileNameKey(tsFileName), dataBaseName));
+ }
+
+ public static PipeTransferTsFileSealWithModReq toTPipeTransferReq(
+ final String tsFileName, final long tsFileLength, final String dataBaseName)
+ throws IOException {
+ return (PipeTransferTsFileSealWithModReq)
+ new PipeTransferTsFileSealWithModReq()
+ .convertToTPipeTransferReq(
+ Collections.singletonList(tsFileName),
+ Collections.singletonList(tsFileLength),
+ Collections.singletonMap(
+ generateDatabaseNameWithFileNameKey(tsFileName), dataBaseName));
+ }
+
+ public static PipeTransferTsFileSealWithModReq fromTPipeTransferReq(final TPipeTransferReq req) {
+ return (PipeTransferTsFileSealWithModReq)
+ new PipeTransferTsFileSealWithModReq().translateFromTPipeTransferReq(req);
+ }
+
+ /////////////////////////////// Air Gap ///////////////////////////////
+
+ public static byte[] toTPipeTransferBytes(
+ final String modFileName,
+ final long modFileLength,
+ final String tsFileName,
+ final long tsFileLength)
+ throws IOException {
+ return new PipeTransferTsFileSealWithModReq()
+ .convertToTPipeTransferSnapshotSealBytes(
+ Arrays.asList(modFileName, tsFileName),
+ Arrays.asList(modFileLength, tsFileLength),
+ new HashMap<>());
+ }
+
+ public static byte[] toTPipeTransferBytes(
+ final String modFileName,
+ final long modFileLength,
+ final String tsFileName,
+ final long tsFileLength,
+ final String dataBaseName)
+ throws IOException {
+ return new PipeTransferTsFileSealWithModReq()
+ .convertToTPipeTransferSnapshotSealBytes(
+ Arrays.asList(modFileName, tsFileName),
+ Arrays.asList(modFileLength, tsFileLength),
+ Collections.singletonMap(
+ generateDatabaseNameWithFileNameKey(tsFileName), dataBaseName));
+ }
+
+ public static byte[] toTPipeTransferBytes(
+ final String tsFileName, final long tsFileLength, final String dataBaseName)
+ throws IOException {
+ return new PipeTransferTsFileSealWithModReq()
+ .convertToTPipeTransferSnapshotSealBytes(
+ Collections.singletonList(tsFileName),
+ Collections.singletonList(tsFileLength),
+ Collections.singletonMap(
+ generateDatabaseNameWithFileNameKey(tsFileName), dataBaseName));
+ }
+
+ /////////////////////////////// Object ///////////////////////////////
+
+ @Override
+ public boolean equals(final Object obj) {
+ return obj instanceof PipeTransferTsFileSealWithModReq && super.equals(obj);
+ }
+
+ @Override
+ public int hashCode() {
+ return super.hashCode();
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/response/PipeTransferFilePieceResp.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/response/PipeTransferFilePieceResp.java
new file mode 100644
index 0000000..d2b6a9e
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/payload/thrift/response/PipeTransferFilePieceResp.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.response;
+
+import org.apache.iotdb.service.rpc.thrift.TPipeTransferResp;
+
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import java.util.Objects;
+
+public class PipeTransferFilePieceResp extends TPipeTransferResp {
+
+ private long endWritingOffset;
+
+ private PipeTransferFilePieceResp() {
+ // Empty constructor
+ }
+
+ public long getEndWritingOffset() {
+ return endWritingOffset;
+ }
+
+ /////////////////////////////// Thrift ///////////////////////////////
+
+ public static PipeTransferFilePieceResp fromTPipeTransferResp(TPipeTransferResp transferResp) {
+ final PipeTransferFilePieceResp filePieceResp = new PipeTransferFilePieceResp();
+
+ filePieceResp.status = transferResp.status;
+
+ if (transferResp.isSetBody()) {
+ filePieceResp.endWritingOffset = ReadWriteIOUtils.readLong(transferResp.body);
+ filePieceResp.body = transferResp.body;
+ }
+
+ return filePieceResp;
+ }
+
+ /////////////////////////////// Object ///////////////////////////////
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ PipeTransferFilePieceResp that = (PipeTransferFilePieceResp) obj;
+ return endWritingOffset == that.endWritingOffset
+ && status.equals(that.status)
+ && body.equals(that.body);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(endWritingOffset, status, body);
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/protocol/IoTDBConnector.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/protocol/IoTDBConnector.java
new file mode 100644
index 0000000..d358ae8
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/protocol/IoTDBConnector.java
@@ -0,0 +1,472 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.protocol;
+
+import org.apache.iotdb.collector.plugin.builtin.sink.compressor.PipeCompressor;
+import org.apache.iotdb.collector.plugin.builtin.sink.compressor.PipeCompressorConfig;
+import org.apache.iotdb.collector.plugin.builtin.sink.compressor.PipeCompressorFactory;
+import org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request.PipeTransferCompressedReq;
+import org.apache.iotdb.common.rpc.thrift.TEndPoint;
+import org.apache.iotdb.pipe.api.PipeSink;
+import org.apache.iotdb.pipe.api.customizer.configuration.PipeSinkRuntimeConfiguration;
+import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameterValidator;
+import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters;
+import org.apache.iotdb.pipe.api.exception.PipeParameterNotValidException;
+import org.apache.iotdb.rpc.UrlUtils;
+import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_COMPRESSOR_DEFAULT_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_COMPRESSOR_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_COMPRESSOR_SET;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_COMPRESSOR_ZSTD_LEVEL_DEFAULT_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_COMPRESSOR_ZSTD_LEVEL_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_COMPRESSOR_ZSTD_LEVEL_MAX_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_COMPRESSOR_ZSTD_LEVEL_MIN_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_EXCEPTION_CONFLICT_RECORD_IGNORED_DATA_DEFAULT_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_EXCEPTION_CONFLICT_RECORD_IGNORED_DATA_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_EXCEPTION_CONFLICT_RESOLVE_STRATEGY_DEFAULT_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_EXCEPTION_CONFLICT_RESOLVE_STRATEGY_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_EXCEPTION_CONFLICT_RETRY_MAX_TIME_SECONDS_DEFAULT_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_EXCEPTION_CONFLICT_RETRY_MAX_TIME_SECONDS_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_EXCEPTION_DATA_CONVERT_ON_TYPE_MISMATCH_DEFAULT_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_EXCEPTION_DATA_CONVERT_ON_TYPE_MISMATCH_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_EXCEPTION_OTHERS_RECORD_IGNORED_DATA_DEFAULT_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_EXCEPTION_OTHERS_RECORD_IGNORED_DATA_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_EXCEPTION_OTHERS_RETRY_MAX_TIME_SECONDS_DEFAULT_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_EXCEPTION_OTHERS_RETRY_MAX_TIME_SECONDS_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_FORMAT_HYBRID_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_FORMAT_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_FORMAT_TABLET_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_FORMAT_TS_FILE_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_IOTDB_BATCH_MODE_ENABLE_DEFAULT_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_IOTDB_BATCH_MODE_ENABLE_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_IOTDB_BATCH_SIZE_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_IOTDB_HOST_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_IOTDB_IP_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_IOTDB_NODE_URLS_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_IOTDB_PASSWORD_DEFAULT_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_IOTDB_PASSWORD_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_IOTDB_PLAIN_BATCH_SIZE_DEFAULT_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_IOTDB_PORT_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_IOTDB_USERNAME_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_IOTDB_USER_DEFAULT_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_IOTDB_USER_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_LOAD_BALANCE_ROUND_ROBIN_STRATEGY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_LOAD_BALANCE_STRATEGY_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_LOAD_BALANCE_STRATEGY_SET;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_LOAD_TSFILE_STRATEGY_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_LOAD_TSFILE_STRATEGY_SET;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_LOAD_TSFILE_STRATEGY_SYNC_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_LOAD_TSFILE_VALIDATION_DEFAULT_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_LOAD_TSFILE_VALIDATION_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_MARK_AS_PIPE_REQUEST_DEFAULT_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_MARK_AS_PIPE_REQUEST_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_COMPRESSOR_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_COMPRESSOR_ZSTD_LEVEL_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_EXCEPTION_CONFLICT_RECORD_IGNORED_DATA_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_EXCEPTION_CONFLICT_RESOLVE_STRATEGY_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_EXCEPTION_CONFLICT_RETRY_MAX_TIME_SECONDS_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_EXCEPTION_DATA_CONVERT_ON_TYPE_MISMATCH_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_EXCEPTION_OTHERS_RECORD_IGNORED_DATA_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_EXCEPTION_OTHERS_RETRY_MAX_TIME_SECONDS_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_FORMAT_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_IOTDB_BATCH_MODE_ENABLE_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_IOTDB_BATCH_SIZE_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_IOTDB_HOST_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_IOTDB_IP_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_IOTDB_NODE_URLS_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_IOTDB_PASSWORD_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_IOTDB_PORT_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_IOTDB_USERNAME_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_IOTDB_USER_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_LOAD_BALANCE_STRATEGY_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_LOAD_TSFILE_STRATEGY_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_LOAD_TSFILE_VALIDATION_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_MARK_AS_PIPE_REQUEST_KEY;
+
+public abstract class IoTDBConnector implements PipeSink {
+
+ private static final String PARSE_URL_ERROR_FORMATTER =
+ "Exception occurred while parsing node urls from target servers: {}";
+ private static final String PARSE_URL_ERROR_MESSAGE =
+ "Error occurred while parsing node urls from target servers, please check the specified 'host':'port' or 'node-urls'";
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBConnector.class);
+
+ protected final List nodeUrls = new ArrayList<>();
+
+ protected String username = CONNECTOR_IOTDB_USER_DEFAULT_VALUE;
+ protected String password = CONNECTOR_IOTDB_PASSWORD_DEFAULT_VALUE;
+
+ protected String loadBalanceStrategy;
+
+ protected String loadTsFileStrategy;
+ protected boolean loadTsFileValidation;
+
+ protected boolean shouldMarkAsPipeRequest;
+
+ private boolean isRpcCompressionEnabled;
+ private final List compressors = new ArrayList<>();
+
+ protected boolean isTabletBatchModeEnabled = true;
+
+ protected PipeReceiverStatusHandler receiverStatusHandler;
+ protected boolean shouldReceiverConvertOnTypeMismatch =
+ CONNECTOR_EXCEPTION_DATA_CONVERT_ON_TYPE_MISMATCH_DEFAULT_VALUE;
+
+ @Override
+ public void validate(final PipeParameterValidator validator) throws Exception {
+ final PipeParameters parameters = validator.getParameters();
+
+ validator.validate(
+ args ->
+ (boolean) args[0]
+ || (((boolean) args[1] || (boolean) args[2]) && (boolean) args[3])
+ || (boolean) args[4]
+ || (((boolean) args[5] || (boolean) args[6]) && (boolean) args[7]),
+ String.format(
+ "One of %s, %s:%s, %s, %s:%s must be specified",
+ CONNECTOR_IOTDB_NODE_URLS_KEY,
+ CONNECTOR_IOTDB_HOST_KEY,
+ CONNECTOR_IOTDB_PORT_KEY,
+ SINK_IOTDB_NODE_URLS_KEY,
+ SINK_IOTDB_HOST_KEY,
+ SINK_IOTDB_PORT_KEY),
+ parameters.hasAttribute(CONNECTOR_IOTDB_NODE_URLS_KEY),
+ parameters.hasAttribute(CONNECTOR_IOTDB_IP_KEY),
+ parameters.hasAttribute(CONNECTOR_IOTDB_HOST_KEY),
+ parameters.hasAttribute(CONNECTOR_IOTDB_PORT_KEY),
+ parameters.hasAttribute(SINK_IOTDB_NODE_URLS_KEY),
+ parameters.hasAttribute(SINK_IOTDB_IP_KEY),
+ parameters.hasAttribute(SINK_IOTDB_HOST_KEY),
+ parameters.hasAttribute(SINK_IOTDB_PORT_KEY));
+
+ validator.validate(
+ requestMaxBatchSizeInBytes -> (long) requestMaxBatchSizeInBytes > 0,
+ String.format(
+ "%s must be > 0, but got %s",
+ SINK_IOTDB_BATCH_SIZE_KEY,
+ parameters.getLongOrDefault(
+ Arrays.asList(CONNECTOR_IOTDB_BATCH_SIZE_KEY, SINK_IOTDB_BATCH_SIZE_KEY),
+ CONNECTOR_IOTDB_PLAIN_BATCH_SIZE_DEFAULT_VALUE)),
+ parameters.getLongOrDefault(
+ Arrays.asList(CONNECTOR_IOTDB_BATCH_SIZE_KEY, SINK_IOTDB_BATCH_SIZE_KEY),
+ CONNECTOR_IOTDB_PLAIN_BATCH_SIZE_DEFAULT_VALUE));
+
+ // Check coexistence of user and username
+ validator.validateSynonymAttributes(
+ Arrays.asList(CONNECTOR_IOTDB_USER_KEY, SINK_IOTDB_USER_KEY),
+ Arrays.asList(CONNECTOR_IOTDB_USERNAME_KEY, SINK_IOTDB_USERNAME_KEY),
+ false);
+
+ username =
+ parameters.getStringOrDefault(
+ Arrays.asList(
+ CONNECTOR_IOTDB_USER_KEY,
+ SINK_IOTDB_USER_KEY,
+ CONNECTOR_IOTDB_USERNAME_KEY,
+ SINK_IOTDB_USERNAME_KEY),
+ CONNECTOR_IOTDB_USER_DEFAULT_VALUE);
+ password =
+ parameters.getStringOrDefault(
+ Arrays.asList(CONNECTOR_IOTDB_PASSWORD_KEY, SINK_IOTDB_PASSWORD_KEY),
+ CONNECTOR_IOTDB_PASSWORD_DEFAULT_VALUE);
+
+ loadBalanceStrategy =
+ parameters
+ .getStringOrDefault(
+ Arrays.asList(CONNECTOR_LOAD_BALANCE_STRATEGY_KEY, SINK_LOAD_BALANCE_STRATEGY_KEY),
+ CONNECTOR_LOAD_BALANCE_ROUND_ROBIN_STRATEGY)
+ .trim()
+ .toLowerCase();
+ validator.validate(
+ arg -> CONNECTOR_LOAD_BALANCE_STRATEGY_SET.contains(loadBalanceStrategy),
+ String.format(
+ "Load balance strategy should be one of %s, but got %s.",
+ CONNECTOR_LOAD_BALANCE_STRATEGY_SET, loadBalanceStrategy),
+ loadBalanceStrategy);
+
+ loadTsFileStrategy =
+ parameters
+ .getStringOrDefault(
+ Arrays.asList(CONNECTOR_LOAD_TSFILE_STRATEGY_KEY, SINK_LOAD_TSFILE_STRATEGY_KEY),
+ CONNECTOR_LOAD_TSFILE_STRATEGY_SYNC_VALUE)
+ .trim()
+ .toLowerCase();
+ validator.validate(
+ arg -> CONNECTOR_LOAD_TSFILE_STRATEGY_SET.contains(loadTsFileStrategy),
+ String.format(
+ "Load tsfile strategy should be one of %s, but got %s.",
+ CONNECTOR_LOAD_TSFILE_STRATEGY_SET, loadTsFileStrategy),
+ loadTsFileStrategy);
+ loadTsFileValidation =
+ parameters.getBooleanOrDefault(
+ Arrays.asList(CONNECTOR_LOAD_TSFILE_VALIDATION_KEY, SINK_LOAD_TSFILE_VALIDATION_KEY),
+ CONNECTOR_LOAD_TSFILE_VALIDATION_DEFAULT_VALUE);
+
+ final int zstdCompressionLevel =
+ parameters.getIntOrDefault(
+ Arrays.asList(CONNECTOR_COMPRESSOR_ZSTD_LEVEL_KEY, SINK_COMPRESSOR_ZSTD_LEVEL_KEY),
+ CONNECTOR_COMPRESSOR_ZSTD_LEVEL_DEFAULT_VALUE);
+ validator.validate(
+ arg ->
+ (int) arg >= CONNECTOR_COMPRESSOR_ZSTD_LEVEL_MIN_VALUE
+ && (int) arg <= CONNECTOR_COMPRESSOR_ZSTD_LEVEL_MAX_VALUE,
+ String.format(
+ "Zstd compression level should be in the range [%d, %d], but got %d.",
+ CONNECTOR_COMPRESSOR_ZSTD_LEVEL_MIN_VALUE,
+ CONNECTOR_COMPRESSOR_ZSTD_LEVEL_MAX_VALUE,
+ zstdCompressionLevel),
+ zstdCompressionLevel);
+
+ final String compressionTypes =
+ parameters
+ .getStringOrDefault(
+ Arrays.asList(CONNECTOR_COMPRESSOR_KEY, SINK_COMPRESSOR_KEY),
+ CONNECTOR_COMPRESSOR_DEFAULT_VALUE)
+ .toLowerCase();
+ if (!compressionTypes.isEmpty()) {
+ for (final String compressionType : compressionTypes.split(",")) {
+ final String trimmedCompressionType = compressionType.trim();
+ if (trimmedCompressionType.isEmpty()) {
+ continue;
+ }
+
+ validator.validate(
+ arg -> CONNECTOR_COMPRESSOR_SET.contains(trimmedCompressionType),
+ String.format(
+ "Compressor should be one of %s, but got %s.",
+ CONNECTOR_COMPRESSOR_SET, trimmedCompressionType),
+ trimmedCompressionType);
+ compressors.add(
+ PipeCompressorFactory.getCompressor(
+ new PipeCompressorConfig(trimmedCompressionType, zstdCompressionLevel)));
+ }
+ }
+ validator.validate(
+ arg -> compressors.size() <= Byte.MAX_VALUE,
+ String.format(
+ "The number of compressors should be less than or equal to %d, but got %d.",
+ Byte.MAX_VALUE, compressors.size()),
+ compressors.size());
+ isRpcCompressionEnabled = !compressors.isEmpty();
+
+ validator.validate(
+ arg -> arg.equals("retry") || arg.equals("ignore"),
+ String.format(
+ "The value of key %s or %s must be either 'retry' or 'ignore'.",
+ CONNECTOR_EXCEPTION_CONFLICT_RESOLVE_STRATEGY_KEY,
+ SINK_EXCEPTION_CONFLICT_RESOLVE_STRATEGY_KEY),
+ parameters
+ .getStringOrDefault(
+ Arrays.asList(
+ CONNECTOR_EXCEPTION_CONFLICT_RESOLVE_STRATEGY_KEY,
+ SINK_EXCEPTION_CONFLICT_RESOLVE_STRATEGY_KEY),
+ CONNECTOR_EXCEPTION_CONFLICT_RESOLVE_STRATEGY_DEFAULT_VALUE)
+ .trim()
+ .toLowerCase());
+
+ validator.validateAttributeValueRange(
+ validator.getParameters().hasAttribute(CONNECTOR_FORMAT_KEY)
+ ? CONNECTOR_FORMAT_KEY
+ : SINK_FORMAT_KEY,
+ true,
+ CONNECTOR_FORMAT_TABLET_VALUE,
+ CONNECTOR_FORMAT_HYBRID_VALUE,
+ CONNECTOR_FORMAT_TS_FILE_VALUE);
+ }
+
+ @Override
+ public void customize(
+ final PipeParameters parameters, final PipeSinkRuntimeConfiguration configuration)
+ throws Exception {
+ nodeUrls.clear();
+ nodeUrls.addAll(parseNodeUrls(parameters));
+ LOGGER.info("IoTDBConnector nodeUrls: {}", nodeUrls);
+
+ isTabletBatchModeEnabled =
+ parameters.getBooleanOrDefault(
+ Arrays.asList(
+ CONNECTOR_IOTDB_BATCH_MODE_ENABLE_KEY, SINK_IOTDB_BATCH_MODE_ENABLE_KEY),
+ CONNECTOR_IOTDB_BATCH_MODE_ENABLE_DEFAULT_VALUE)
+ || parameters
+ .getStringOrDefault(
+ Arrays.asList(CONNECTOR_FORMAT_KEY, SINK_FORMAT_KEY),
+ CONNECTOR_FORMAT_HYBRID_VALUE)
+ .equals(CONNECTOR_FORMAT_TS_FILE_VALUE);
+ LOGGER.info("IoTDBConnector isTabletBatchModeEnabled: {}", isTabletBatchModeEnabled);
+
+ shouldMarkAsPipeRequest =
+ parameters.getBooleanOrDefault(
+ Arrays.asList(CONNECTOR_MARK_AS_PIPE_REQUEST_KEY, SINK_MARK_AS_PIPE_REQUEST_KEY),
+ CONNECTOR_MARK_AS_PIPE_REQUEST_DEFAULT_VALUE);
+ LOGGER.info("IoTDBConnector shouldMarkAsPipeRequest: {}", shouldMarkAsPipeRequest);
+
+ receiverStatusHandler =
+ new PipeReceiverStatusHandler(
+ parameters
+ .getStringOrDefault(
+ Arrays.asList(
+ CONNECTOR_EXCEPTION_CONFLICT_RESOLVE_STRATEGY_KEY,
+ SINK_EXCEPTION_CONFLICT_RESOLVE_STRATEGY_KEY),
+ CONNECTOR_EXCEPTION_CONFLICT_RESOLVE_STRATEGY_DEFAULT_VALUE)
+ .trim()
+ .equalsIgnoreCase("retry"),
+ parameters.getLongOrDefault(
+ Arrays.asList(
+ CONNECTOR_EXCEPTION_CONFLICT_RETRY_MAX_TIME_SECONDS_KEY,
+ SINK_EXCEPTION_CONFLICT_RETRY_MAX_TIME_SECONDS_KEY),
+ CONNECTOR_EXCEPTION_CONFLICT_RETRY_MAX_TIME_SECONDS_DEFAULT_VALUE),
+ parameters.getBooleanOrDefault(
+ Arrays.asList(
+ CONNECTOR_EXCEPTION_CONFLICT_RECORD_IGNORED_DATA_KEY,
+ SINK_EXCEPTION_CONFLICT_RECORD_IGNORED_DATA_KEY),
+ CONNECTOR_EXCEPTION_CONFLICT_RECORD_IGNORED_DATA_DEFAULT_VALUE),
+ parameters.getLongOrDefault(
+ Arrays.asList(
+ CONNECTOR_EXCEPTION_OTHERS_RETRY_MAX_TIME_SECONDS_KEY,
+ SINK_EXCEPTION_OTHERS_RETRY_MAX_TIME_SECONDS_KEY),
+ CONNECTOR_EXCEPTION_OTHERS_RETRY_MAX_TIME_SECONDS_DEFAULT_VALUE),
+ parameters.getBooleanOrDefault(
+ Arrays.asList(
+ CONNECTOR_EXCEPTION_OTHERS_RECORD_IGNORED_DATA_KEY,
+ SINK_EXCEPTION_OTHERS_RECORD_IGNORED_DATA_KEY),
+ CONNECTOR_EXCEPTION_OTHERS_RECORD_IGNORED_DATA_DEFAULT_VALUE));
+
+ shouldReceiverConvertOnTypeMismatch =
+ parameters.getBooleanOrDefault(
+ Arrays.asList(
+ CONNECTOR_EXCEPTION_DATA_CONVERT_ON_TYPE_MISMATCH_KEY,
+ SINK_EXCEPTION_DATA_CONVERT_ON_TYPE_MISMATCH_KEY),
+ CONNECTOR_EXCEPTION_DATA_CONVERT_ON_TYPE_MISMATCH_DEFAULT_VALUE);
+ LOGGER.info(
+ "IoTDBConnector {} = {}",
+ CONNECTOR_EXCEPTION_DATA_CONVERT_ON_TYPE_MISMATCH_KEY,
+ shouldReceiverConvertOnTypeMismatch);
+ }
+
+ protected LinkedHashSet parseNodeUrls(final PipeParameters parameters)
+ throws PipeParameterNotValidException {
+ final LinkedHashSet givenNodeUrls = new LinkedHashSet<>(nodeUrls);
+
+ try {
+ if (parameters.hasAttribute(CONNECTOR_IOTDB_IP_KEY)
+ && parameters.hasAttribute(CONNECTOR_IOTDB_PORT_KEY)) {
+ givenNodeUrls.add(
+ new TEndPoint(
+ parameters.getStringByKeys(CONNECTOR_IOTDB_IP_KEY),
+ parameters.getIntByKeys(CONNECTOR_IOTDB_PORT_KEY)));
+ }
+
+ if (parameters.hasAttribute(SINK_IOTDB_IP_KEY)
+ && parameters.hasAttribute(SINK_IOTDB_PORT_KEY)) {
+ givenNodeUrls.add(
+ new TEndPoint(
+ parameters.getStringByKeys(SINK_IOTDB_IP_KEY),
+ parameters.getIntByKeys(SINK_IOTDB_PORT_KEY)));
+ }
+
+ if (parameters.hasAttribute(CONNECTOR_IOTDB_HOST_KEY)
+ && parameters.hasAttribute(CONNECTOR_IOTDB_PORT_KEY)) {
+ givenNodeUrls.add(
+ new TEndPoint(
+ parameters.getStringByKeys(CONNECTOR_IOTDB_HOST_KEY),
+ parameters.getIntByKeys(CONNECTOR_IOTDB_PORT_KEY)));
+ }
+
+ if (parameters.hasAttribute(SINK_IOTDB_HOST_KEY)
+ && parameters.hasAttribute(SINK_IOTDB_PORT_KEY)) {
+ givenNodeUrls.add(
+ new TEndPoint(
+ parameters.getStringByKeys(SINK_IOTDB_HOST_KEY),
+ parameters.getIntByKeys(SINK_IOTDB_PORT_KEY)));
+ }
+
+ if (parameters.hasAttribute(CONNECTOR_IOTDB_NODE_URLS_KEY)) {
+ givenNodeUrls.addAll(
+ parseTEndPointUrls(
+ Arrays.asList(
+ parameters
+ .getStringByKeys(CONNECTOR_IOTDB_NODE_URLS_KEY)
+ .replace(" ", "")
+ .split(","))));
+ }
+
+ if (parameters.hasAttribute(SINK_IOTDB_NODE_URLS_KEY)) {
+ givenNodeUrls.addAll(
+ parseTEndPointUrls(
+ Arrays.asList(
+ parameters
+ .getStringByKeys(SINK_IOTDB_NODE_URLS_KEY)
+ .replace(" ", "")
+ .split(","))));
+ }
+ } catch (final Exception e) {
+ LOGGER.warn(PARSE_URL_ERROR_FORMATTER, e.toString());
+ throw new PipeParameterNotValidException(PARSE_URL_ERROR_MESSAGE);
+ }
+
+ checkNodeUrls(givenNodeUrls);
+
+ return givenNodeUrls;
+ }
+
+ private static List parseTEndPointUrls(List endPointUrls) {
+ if (endPointUrls == null) {
+ throw new NumberFormatException("endPointUrls is null");
+ }
+ List result = new ArrayList<>();
+ for (String url : endPointUrls) {
+ result.add(UrlUtils.parseTEndPointIpv4AndIpv6Url(url));
+ }
+ return result;
+ }
+
+ private static void checkNodeUrls(final Set nodeUrls)
+ throws PipeParameterNotValidException {
+ for (final TEndPoint nodeUrl : nodeUrls) {
+ if (Objects.isNull(nodeUrl.ip) || nodeUrl.ip.isEmpty()) {
+ LOGGER.warn(PARSE_URL_ERROR_FORMATTER, "host cannot be empty");
+ throw new PipeParameterNotValidException(PARSE_URL_ERROR_MESSAGE);
+ }
+ if (nodeUrl.port == 0) {
+ LOGGER.warn(PARSE_URL_ERROR_FORMATTER, "port cannot be empty");
+ throw new PipeParameterNotValidException(PARSE_URL_ERROR_MESSAGE);
+ }
+ }
+ }
+
+ protected TPipeTransferReq compressIfNeeded(final TPipeTransferReq req) throws IOException {
+ return isRpcCompressionEnabled
+ ? PipeTransferCompressedReq.toTPipeTransferReq(req, compressors)
+ : req;
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/protocol/IoTDBDataRegionSyncConnector.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/protocol/IoTDBDataRegionSyncConnector.java
new file mode 100644
index 0000000..cfd7457
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/protocol/IoTDBDataRegionSyncConnector.java
@@ -0,0 +1,365 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.protocol;
+
+import org.apache.iotdb.collector.plugin.builtin.sink.client.IoTDBDataNodeSyncClientManager;
+import org.apache.iotdb.collector.plugin.builtin.sink.client.IoTDBSyncClient;
+import org.apache.iotdb.collector.plugin.builtin.sink.client.IoTDBSyncClientManager;
+import org.apache.iotdb.collector.plugin.builtin.sink.event.PipeRawTabletInsertionEvent;
+import org.apache.iotdb.collector.plugin.builtin.sink.event.PipeTsFileInsertionEvent;
+import org.apache.iotdb.collector.plugin.builtin.sink.payload.evolvable.batch.PipeTabletEventBatch;
+import org.apache.iotdb.collector.plugin.builtin.sink.payload.evolvable.batch.PipeTabletEventPlainBatch;
+import org.apache.iotdb.collector.plugin.builtin.sink.payload.evolvable.batch.PipeTabletEventTsFileBatch;
+import org.apache.iotdb.collector.plugin.builtin.sink.payload.evolvable.batch.PipeTransferBatchReqBuilder;
+import org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request.PipeTransferFilePieceReq;
+import org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request.PipeTransferTabletRawReqV2;
+import org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request.PipeTransferTsFilePieceReq;
+import org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request.PipeTransferTsFilePieceWithModReq;
+import org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request.PipeTransferTsFileSealWithModReq;
+import org.apache.iotdb.collector.utils.cacher.LeaderCacheUtils;
+import org.apache.iotdb.common.rpc.thrift.TEndPoint;
+import org.apache.iotdb.common.rpc.thrift.TSStatus;
+import org.apache.iotdb.pipe.api.customizer.configuration.PipeConnectorRuntimeConfiguration;
+import org.apache.iotdb.pipe.api.customizer.configuration.PipeSinkRuntimeConfiguration;
+import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters;
+import org.apache.iotdb.pipe.api.event.Event;
+import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent;
+import org.apache.iotdb.pipe.api.event.dml.insertion.TsFileInsertionEvent;
+import org.apache.iotdb.pipe.api.exception.PipeConnectionException;
+import org.apache.iotdb.pipe.api.exception.PipeException;
+import org.apache.iotdb.rpc.TSStatusCode;
+import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq;
+import org.apache.iotdb.service.rpc.thrift.TPipeTransferResp;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.tsfile.exception.write.WriteProcessException;
+import org.apache.tsfile.utils.Pair;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.NoSuchFileException;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+
+public class IoTDBDataRegionSyncConnector extends IoTDBSslSyncConnector {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBDataRegionSyncConnector.class);
+
+ private PipeTransferBatchReqBuilder tabletBatchBuilder;
+
+ protected IoTDBDataNodeSyncClientManager clientManager;
+
+ @Override
+ protected IoTDBSyncClientManager constructClient(
+ final List nodeUrls,
+ final boolean useSSL,
+ final String trustStorePath,
+ final String trustStorePwd,
+ /* The following parameters are used locally. */
+ final boolean useLeaderCache,
+ final String loadBalanceStrategy,
+ /* The following parameters are used to handshake with the receiver. */
+ final String username,
+ final String password,
+ final boolean shouldReceiverConvertOnTypeMismatch,
+ final String loadTsFileStrategy,
+ final boolean validateTsFile,
+ final boolean shouldMarkAsPipeRequest) {
+ clientManager =
+ new IoTDBDataNodeSyncClientManager(
+ nodeUrls,
+ useSSL,
+ Objects.nonNull(trustStorePath)
+ ? /*IoTDBConfig.addDataHomeDir(trustStorePath)*/ ""
+ : null,
+ trustStorePwd,
+ useLeaderCache,
+ loadBalanceStrategy,
+ username,
+ password,
+ shouldReceiverConvertOnTypeMismatch,
+ loadTsFileStrategy,
+ validateTsFile,
+ shouldMarkAsPipeRequest);
+ return clientManager;
+ }
+
+ @Override
+ public void customize(
+ final PipeParameters parameters, final PipeSinkRuntimeConfiguration configuration)
+ throws Exception {
+ super.customize(parameters, configuration);
+
+ // tablet batch mode configuration
+ if (isTabletBatchModeEnabled) {
+ tabletBatchBuilder = new PipeTransferBatchReqBuilder(parameters);
+ }
+ }
+
+ @Override
+ protected PipeTransferFilePieceReq getTransferSingleFilePieceReq(
+ final String fileName, final long position, final byte[] payLoad) throws IOException {
+ return PipeTransferTsFilePieceReq.toTPipeTransferReq(fileName, position, payLoad);
+ }
+
+ @Override
+ protected PipeTransferFilePieceReq getTransferMultiFilePieceReq(
+ final String fileName, final long position, final byte[] payLoad) throws IOException {
+ return PipeTransferTsFilePieceWithModReq.toTPipeTransferReq(fileName, position, payLoad);
+ }
+
+ @Override
+ public void customize(
+ PipeParameters pipeParameters, PipeConnectorRuntimeConfiguration pipeSinkRuntimeConfiguration)
+ throws Exception {}
+
+ @Override
+ public void transfer(final TabletInsertionEvent tabletInsertionEvent) throws Exception {
+ if (isTabletBatchModeEnabled) {
+ final Pair endPointAndBatch =
+ tabletBatchBuilder.onEvent(tabletInsertionEvent);
+ if (Objects.nonNull(endPointAndBatch)) {
+ doTransferWrapper(endPointAndBatch);
+ }
+ } else {
+ doTransferWrapper((PipeRawTabletInsertionEvent) tabletInsertionEvent);
+ }
+ }
+
+ @Override
+ public void transfer(final TsFileInsertionEvent tsFileInsertionEvent) throws Exception {
+ // In order to commit in order
+ if (isTabletBatchModeEnabled && !tabletBatchBuilder.isEmpty()) {
+ doTransferWrapper();
+ }
+
+ doTransferWrapper((PipeTsFileInsertionEvent) tsFileInsertionEvent);
+ }
+
+ @Override
+ public void transfer(final Event event) throws Exception {
+ if (isTabletBatchModeEnabled && !tabletBatchBuilder.isEmpty()) {
+ doTransferWrapper();
+ }
+ }
+
+ private void doTransferWrapper() throws IOException, WriteProcessException {
+ for (final Pair nonEmptyBatch :
+ tabletBatchBuilder.getAllNonEmptyBatches()) {
+ doTransferWrapper(nonEmptyBatch);
+ }
+ }
+
+ private void doTransferWrapper(final Pair endPointAndBatch)
+ throws IOException, WriteProcessException {
+ final PipeTabletEventBatch batch = endPointAndBatch.getRight();
+ if (batch instanceof PipeTabletEventPlainBatch) {
+ doTransfer(endPointAndBatch.getLeft(), (PipeTabletEventPlainBatch) batch);
+ } else if (batch instanceof PipeTabletEventTsFileBatch) {
+ doTransfer((PipeTabletEventTsFileBatch) batch);
+ } else {
+ LOGGER.warn("Unsupported batch type {}.", batch.getClass());
+ }
+ batch.onSuccess();
+ }
+
+ private void doTransfer(
+ final TEndPoint endPoint, final PipeTabletEventPlainBatch batchToTransfer) {
+ final Pair clientAndStatus = clientManager.getClient(endPoint);
+
+ final TPipeTransferResp resp;
+ try {
+ final TPipeTransferReq uncompressedReq = batchToTransfer.toTPipeTransferReq();
+ final TPipeTransferReq req = compressIfNeeded(uncompressedReq);
+
+ resp = clientAndStatus.getLeft().pipeTransfer(req);
+ } catch (final Exception e) {
+ clientAndStatus.setRight(false);
+ throw new PipeConnectionException(
+ String.format("Network error when transfer tablet batch, because %s.", e.getMessage()),
+ e);
+ }
+
+ final TSStatus status = resp.getStatus();
+ // Only handle the failed statuses to avoid string format performance overhead
+ if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()
+ && status.getCode() != TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode()) {
+ receiverStatusHandler.handle(
+ resp.getStatus(),
+ String.format("Transfer PipeTransferTabletBatchReq error, result status %s", resp.status),
+ batchToTransfer.deepCopyEvents().toString());
+ }
+
+ for (final Pair redirectPair :
+ LeaderCacheUtils.parseRecommendedRedirections(status)) {
+ clientManager.updateLeaderCache(redirectPair.getLeft(), redirectPair.getRight());
+ }
+ }
+
+ private void doTransfer(final PipeTabletEventTsFileBatch batchToTransfer)
+ throws IOException, WriteProcessException {
+ final List> dbTsFilePairs = batchToTransfer.sealTsFiles();
+ final Map, Double> pipe2WeightMap = batchToTransfer.deepCopyPipe2WeightMap();
+
+ for (final Pair dbTsFile : dbTsFilePairs) {
+ doTransfer(dbTsFile.right, null, dbTsFile.left);
+ try {
+ FileUtils.delete(dbTsFile.right);
+ } catch (final NoSuchFileException e) {
+ LOGGER.info("The file {} is not found, may already be deleted.", dbTsFile);
+ } catch (final Exception e) {
+ LOGGER.warn(
+ "Failed to delete batch file {}, this file should be deleted manually later", dbTsFile);
+ }
+ }
+ }
+
+ private void doTransferWrapper(final PipeRawTabletInsertionEvent pipeRawTabletInsertionEvent)
+ throws PipeException {
+ doTransfer(pipeRawTabletInsertionEvent);
+ }
+
+ private void doTransfer(final PipeRawTabletInsertionEvent pipeRawTabletInsertionEvent)
+ throws PipeException {
+ final Pair clientAndStatus =
+ clientManager.getClient(pipeRawTabletInsertionEvent.getDeviceId());
+ final TPipeTransferResp resp;
+
+ try {
+ final TPipeTransferReq req =
+ compressIfNeeded(
+ PipeTransferTabletRawReqV2.toTPipeTransferReq(
+ pipeRawTabletInsertionEvent.getTablet(),
+ pipeRawTabletInsertionEvent.isAligned(),
+ pipeRawTabletInsertionEvent.isTableModelEvent()
+ ? pipeRawTabletInsertionEvent.getTableModelDatabaseName()
+ : null));
+ resp = clientAndStatus.getLeft().pipeTransfer(req);
+ } catch (final Exception e) {
+ clientAndStatus.setRight(false);
+ throw new PipeConnectionException(
+ String.format(
+ "Network error when transfer raw tablet insertion event, because %s.",
+ e.getMessage()),
+ e);
+ }
+
+ final TSStatus status = resp.getStatus();
+ // Only handle the failed statuses to avoid string format performance overhead
+ if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()
+ && status.getCode() != TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode()) {
+ receiverStatusHandler.handle(
+ status,
+ String.format(
+ "Transfer PipeRawTabletInsertionEvent %s error, result status %s",
+ pipeRawTabletInsertionEvent, status),
+ pipeRawTabletInsertionEvent.toString());
+ }
+ if (status.isSetRedirectNode()) {
+ clientManager.updateLeaderCache(
+ pipeRawTabletInsertionEvent.getDeviceId(), status.getRedirectNode());
+ }
+ }
+
+ private void doTransferWrapper(final PipeTsFileInsertionEvent pipeTsFileInsertionEvent)
+ throws PipeException, IOException {
+ doTransfer(
+ pipeTsFileInsertionEvent.getTsFile(),
+ null,
+ pipeTsFileInsertionEvent.isTableModelEvent()
+ ? pipeTsFileInsertionEvent.getTableModelDatabaseName()
+ : null);
+ }
+
+ private void doTransfer(final File tsFile, final File modFile, final String dataBaseName)
+ throws PipeException, IOException {
+
+ final Pair clientAndStatus = clientManager.getClient();
+ final TPipeTransferResp resp;
+
+ // 1. Transfer tsFile, and mod file if exists and receiver's version >= 2
+ if (Objects.nonNull(modFile) && clientManager.supportModsIfIsDataNodeReceiver()) {
+ transferFilePieces(modFile, clientAndStatus, true);
+ transferFilePieces(tsFile, clientAndStatus, true);
+
+ // 2. Transfer file seal signal with mod, which means the file is transferred completely
+ try {
+ final TPipeTransferReq req =
+ compressIfNeeded(
+ PipeTransferTsFileSealWithModReq.toTPipeTransferReq(
+ modFile.getName(),
+ modFile.length(),
+ tsFile.getName(),
+ tsFile.length(),
+ dataBaseName));
+
+ resp = clientAndStatus.getLeft().pipeTransfer(req);
+ } catch (final Exception e) {
+ clientAndStatus.setRight(false);
+ clientManager.adjustTimeoutIfNecessary(e);
+ throw new PipeConnectionException(
+ String.format("Network error when seal file %s, because %s.", tsFile, e.getMessage()),
+ e);
+ }
+ } else {
+ transferFilePieces(tsFile, clientAndStatus, false);
+
+ // 2. Transfer file seal signal without mod, which means the file is transferred completely
+ try {
+ final TPipeTransferReq req =
+ compressIfNeeded(
+ PipeTransferTsFileSealWithModReq.toTPipeTransferReq(
+ tsFile.getName(), tsFile.length(), dataBaseName));
+
+ resp = clientAndStatus.getLeft().pipeTransfer(req);
+ } catch (final Exception e) {
+ clientAndStatus.setRight(false);
+ clientManager.adjustTimeoutIfNecessary(e);
+ throw new PipeConnectionException(
+ String.format("Network error when seal file %s, because %s.", tsFile, e.getMessage()),
+ e);
+ }
+ }
+
+ final TSStatus status = resp.getStatus();
+ // Only handle the failed statuses to avoid string format performance overhead
+ if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()
+ && status.getCode() != TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode()) {
+ receiverStatusHandler.handle(
+ resp.getStatus(),
+ String.format("Seal file %s error, result status %s.", tsFile, resp.getStatus()),
+ tsFile.getName());
+ }
+
+ LOGGER.info("Successfully transferred file {}.", tsFile);
+ }
+
+ @Override
+ public void close() throws Exception {
+ if (tabletBatchBuilder != null) {
+ tabletBatchBuilder.close();
+ }
+
+ super.close();
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/protocol/IoTDBSslSyncConnector.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/protocol/IoTDBSslSyncConnector.java
new file mode 100644
index 0000000..6c616e2
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/protocol/IoTDBSslSyncConnector.java
@@ -0,0 +1,229 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.protocol;
+
+import org.apache.iotdb.collector.config.PipeRuntimeOptions;
+import org.apache.iotdb.collector.plugin.builtin.sink.client.IoTDBSyncClient;
+import org.apache.iotdb.collector.plugin.builtin.sink.client.IoTDBSyncClientManager;
+import org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.request.PipeTransferFilePieceReq;
+import org.apache.iotdb.collector.plugin.builtin.sink.payload.thrift.response.PipeTransferFilePieceResp;
+import org.apache.iotdb.common.rpc.thrift.TEndPoint;
+import org.apache.iotdb.common.rpc.thrift.TSStatus;
+import org.apache.iotdb.pipe.api.customizer.configuration.PipeSinkRuntimeConfiguration;
+import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameterValidator;
+import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters;
+import org.apache.iotdb.pipe.api.exception.PipeConnectionException;
+import org.apache.iotdb.pipe.api.exception.PipeException;
+import org.apache.iotdb.rpc.TSStatusCode;
+import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.tsfile.utils.Pair;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_LEADER_CACHE_ENABLE_DEFAULT_VALUE;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.CONNECTOR_LEADER_CACHE_ENABLE_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_IOTDB_SSL_ENABLE_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_IOTDB_SSL_TRUST_STORE_PATH_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_IOTDB_SSL_TRUST_STORE_PWD_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_KEY;
+import static org.apache.iotdb.collector.plugin.builtin.sink.constant.PipeConnectorConstant.SINK_LEADER_CACHE_ENABLE_KEY;
+
+public abstract class IoTDBSslSyncConnector extends IoTDBConnector {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBSslSyncConnector.class);
+
+ protected IoTDBSyncClientManager clientManager;
+
+ @Override
+ public void validate(final PipeParameterValidator validator) throws Exception {
+ super.validate(validator);
+
+ final PipeParameters parameters = validator.getParameters();
+
+ final String userSpecifiedConnectorName =
+ parameters
+ .getStringOrDefault(
+ ImmutableList.of(CONNECTOR_KEY, SINK_KEY), ""
+ /*IOTDB_THRIFT_CONNECTOR.getPipePluginName()*/ )
+ .toLowerCase();
+
+ validator.validate(
+ args -> !((boolean) args[0]) || ((boolean) args[1] && (boolean) args[2]),
+ String.format(
+ "When ssl transport is enabled, %s and %s must be specified",
+ SINK_IOTDB_SSL_TRUST_STORE_PATH_KEY, SINK_IOTDB_SSL_TRUST_STORE_PWD_KEY),
+ parameters.getBooleanOrDefault(SINK_IOTDB_SSL_ENABLE_KEY, false),
+ parameters.hasAttribute(SINK_IOTDB_SSL_TRUST_STORE_PATH_KEY),
+ parameters.hasAttribute(SINK_IOTDB_SSL_TRUST_STORE_PWD_KEY));
+ }
+
+ @Override
+ public void customize(
+ final PipeParameters parameters, final PipeSinkRuntimeConfiguration configuration)
+ throws Exception {
+ super.customize(parameters, configuration);
+
+ final String trustStorePath = parameters.getString(SINK_IOTDB_SSL_TRUST_STORE_PATH_KEY);
+ final String trustStorePwd = parameters.getString(SINK_IOTDB_SSL_TRUST_STORE_PWD_KEY);
+
+ // leader cache configuration
+ final boolean useLeaderCache =
+ parameters.getBooleanOrDefault(
+ Arrays.asList(SINK_LEADER_CACHE_ENABLE_KEY, CONNECTOR_LEADER_CACHE_ENABLE_KEY),
+ CONNECTOR_LEADER_CACHE_ENABLE_DEFAULT_VALUE);
+
+ clientManager =
+ constructClient(
+ nodeUrls,
+ false,
+ trustStorePath,
+ trustStorePwd,
+ useLeaderCache,
+ loadBalanceStrategy,
+ username,
+ password,
+ shouldReceiverConvertOnTypeMismatch,
+ loadTsFileStrategy,
+ loadTsFileValidation,
+ shouldMarkAsPipeRequest);
+ }
+
+ protected abstract IoTDBSyncClientManager constructClient(
+ final List nodeUrls,
+ final boolean useSSL,
+ final String trustStorePath,
+ final String trustStorePwd,
+ /* The following parameters are used locally. */
+ final boolean useLeaderCache,
+ final String loadBalanceStrategy,
+ /* The following parameters are used to handshake with the receiver. */
+ final String username,
+ final String password,
+ final boolean shouldReceiverConvertOnTypeMismatch,
+ final String loadTsFileStrategy,
+ final boolean validateTsFile,
+ final boolean shouldMarkAsPipeRequest);
+
+ @Override
+ public void handshake() throws Exception {
+ clientManager.checkClientStatusAndTryReconstructIfNecessary();
+ }
+
+ @Override
+ public void heartbeat() {
+ try {
+ handshake();
+ } catch (final Exception e) {
+ LOGGER.warn(
+ "Failed to reconnect to target server, because: {}. Try to reconnect later.",
+ e.getMessage(),
+ e);
+ }
+ }
+
+ protected void transferFilePieces(
+ final File file,
+ final Pair clientAndStatus,
+ final boolean isMultiFile)
+ throws PipeException, IOException {
+ final int readFileBufferSize = PipeRuntimeOptions.PIPE_CONNECTOR_READ_FILE_BUFFER_SIZE.value();
+ final byte[] readBuffer = new byte[readFileBufferSize];
+ long position = 0;
+ try (final RandomAccessFile reader = new RandomAccessFile(file, "r")) {
+ while (true) {
+ final int readLength = reader.read(readBuffer);
+ if (readLength == -1) {
+ break;
+ }
+
+ final byte[] payLoad =
+ readLength == readFileBufferSize
+ ? readBuffer
+ : Arrays.copyOfRange(readBuffer, 0, readLength);
+ final PipeTransferFilePieceResp resp;
+ try {
+ final TPipeTransferReq req =
+ compressIfNeeded(
+ isMultiFile
+ ? getTransferMultiFilePieceReq(file.getName(), position, payLoad)
+ : getTransferSingleFilePieceReq(file.getName(), position, payLoad));
+
+ resp =
+ PipeTransferFilePieceResp.fromTPipeTransferResp(
+ clientAndStatus.getLeft().pipeTransfer(req));
+ } catch (final Exception e) {
+ clientAndStatus.setRight(false);
+ throw new PipeConnectionException(
+ String.format(
+ "Network error when transfer file %s, because %s.", file, e.getMessage()),
+ e);
+ }
+
+ position += readLength;
+
+ final TSStatus status = resp.getStatus();
+ // This case only happens when the connection is broken, and the connector is reconnected
+ // to the receiver, then the receiver will redirect the file position to the last position
+ if (status.getCode() == TSStatusCode.PIPE_TRANSFER_FILE_OFFSET_RESET.getStatusCode()) {
+ position = resp.getEndWritingOffset();
+ reader.seek(position);
+ LOGGER.info("Redirect file position to {}.", position);
+ continue;
+ }
+
+ // Send handshake req and then re-transfer the event
+ if (status.getCode()
+ == TSStatusCode.PIPE_CONFIG_RECEIVER_HANDSHAKE_NEEDED.getStatusCode()) {
+ clientManager.sendHandshakeReq(clientAndStatus);
+ }
+ // Only handle the failed statuses to avoid string format performance overhead
+ if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()
+ && status.getCode() != TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode()) {
+ receiverStatusHandler.handle(
+ resp.getStatus(),
+ String.format("Transfer file %s error, result status %s.", file, resp.getStatus()),
+ file.getName());
+ }
+ }
+ }
+ }
+
+ protected abstract PipeTransferFilePieceReq getTransferSingleFilePieceReq(
+ final String fileName, final long position, final byte[] payLoad) throws IOException;
+
+ protected abstract PipeTransferFilePieceReq getTransferMultiFilePieceReq(
+ final String fileName, final long position, final byte[] payLoad) throws IOException;
+
+ @Override
+ public void close() throws Exception {
+ if (clientManager != null) {
+ clientManager.close();
+ }
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/protocol/PipeReceiverStatusHandler.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/protocol/PipeReceiverStatusHandler.java
new file mode 100644
index 0000000..013d864
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/protocol/PipeReceiverStatusHandler.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.protocol;
+
+import org.apache.iotdb.common.rpc.thrift.TSStatus;
+import org.apache.iotdb.pipe.api.event.Event;
+import org.apache.iotdb.pipe.api.exception.PipeException;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Objects;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
+
+public class PipeReceiverStatusHandler {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(PipeReceiverStatusHandler.class);
+
+ private static final int CONFLICT_RETRY_MAX_TIMES = 100;
+
+ private final boolean isRetryAllowedWhenConflictOccurs;
+ private final long retryMaxMillisWhenConflictOccurs;
+ private final boolean shouldRecordIgnoredDataWhenConflictOccurs;
+
+ private final long retryMaxMillisWhenOtherExceptionsOccur;
+ private final boolean shouldRecordIgnoredDataWhenOtherExceptionsOccur;
+
+ private final AtomicLong exceptionFirstEncounteredTime = new AtomicLong(0);
+ private final AtomicBoolean exceptionEventHasBeenRetried = new AtomicBoolean(false);
+ private final AtomicReference exceptionRecordedMessage = new AtomicReference<>("");
+
+ public PipeReceiverStatusHandler(
+ final boolean isRetryAllowedWhenConflictOccurs,
+ final long retryMaxSecondsWhenConflictOccurs,
+ final boolean shouldRecordIgnoredDataWhenConflictOccurs,
+ final long retryMaxSecondsWhenOtherExceptionsOccur,
+ final boolean shouldRecordIgnoredDataWhenOtherExceptionsOccur) {
+ this.isRetryAllowedWhenConflictOccurs = isRetryAllowedWhenConflictOccurs;
+ this.retryMaxMillisWhenConflictOccurs =
+ retryMaxSecondsWhenConflictOccurs < 0
+ ? Long.MAX_VALUE
+ : retryMaxSecondsWhenConflictOccurs * 1000;
+ this.shouldRecordIgnoredDataWhenConflictOccurs = shouldRecordIgnoredDataWhenConflictOccurs;
+
+ this.retryMaxMillisWhenOtherExceptionsOccur =
+ retryMaxSecondsWhenOtherExceptionsOccur < 0
+ ? Long.MAX_VALUE
+ : retryMaxSecondsWhenOtherExceptionsOccur * 1000;
+ this.shouldRecordIgnoredDataWhenOtherExceptionsOccur =
+ shouldRecordIgnoredDataWhenOtherExceptionsOccur;
+ }
+
+ /**
+ * Handle {@link TSStatus} returned by receiver. Do nothing if ignore the {@link Event}, and throw
+ * exception if retry the {@link Event}. Upper class must ensure that the method is invoked only
+ * by a single thread.
+ *
+ * @throws PipeException to retry the current {@link Event}
+ * @param status the {@link TSStatus} to judge
+ * @param exceptionMessage The exception message to throw
+ * @param recordMessage The message to record an ignored {@link Event}, the caller should assure
+ * that the same {@link Event} generates always the same record message, for instance, do not
+ * put any time-related info here
+ */
+ public void handle(
+ final TSStatus status, final String exceptionMessage, final String recordMessage) {
+ switch (status.getCode()) {
+ case 200: // SUCCESS_STATUS
+ case 400: // REDIRECTION_RECOMMEND
+ {
+ return;
+ }
+
+ case 1809: // PIPE_RECEIVER_IDEMPOTENT_CONFLICT_EXCEPTION
+ {
+ LOGGER.info("Idempotent conflict exception: will be ignored. status: {}", status);
+ return;
+ }
+
+ case 1808: // PIPE_RECEIVER_TEMPORARY_UNAVAILABLE_EXCEPTION
+ {
+ LOGGER.info("Temporary unavailable exception: will retry forever. status: {}", status);
+ throw new PipeException(exceptionMessage);
+ }
+
+ case 1810: // PIPE_RECEIVER_USER_CONFLICT_EXCEPTION
+ if (!isRetryAllowedWhenConflictOccurs) {
+ LOGGER.warn(
+ "User conflict exception: will be ignored because retry is not allowed. event: {}. status: {}",
+ shouldRecordIgnoredDataWhenConflictOccurs ? recordMessage : "not recorded",
+ status);
+ return;
+ }
+
+ synchronized (this) {
+ recordExceptionStatusIfNecessary(recordMessage);
+
+ if (exceptionEventHasBeenRetried.get()
+ && System.currentTimeMillis() - exceptionFirstEncounteredTime.get()
+ > retryMaxMillisWhenConflictOccurs) {
+ LOGGER.warn(
+ "User conflict exception: retry timeout. will be ignored. event: {}. status: {}",
+ shouldRecordIgnoredDataWhenConflictOccurs ? recordMessage : "not recorded",
+ status);
+ resetExceptionStatus();
+ return;
+ }
+
+ LOGGER.warn(
+ "User conflict exception: will retry {}. status: {}",
+ retryMaxMillisWhenConflictOccurs == Long.MAX_VALUE
+ ? "forever"
+ : "for at least "
+ + (retryMaxMillisWhenConflictOccurs
+ + exceptionFirstEncounteredTime.get()
+ - System.currentTimeMillis())
+ / 1000.0
+ + " seconds",
+ status);
+ exceptionEventHasBeenRetried.set(true);
+ throw new PipeException(
+ exceptionMessage,
+ (int)
+ Math.max(
+ 5,
+ Math.min(CONFLICT_RETRY_MAX_TIMES, retryMaxMillisWhenConflictOccurs * 1.1)));
+ }
+
+ default: // Other exceptions
+ synchronized (this) {
+ recordExceptionStatusIfNecessary(recordMessage);
+
+ if (exceptionEventHasBeenRetried.get()
+ && System.currentTimeMillis() - exceptionFirstEncounteredTime.get()
+ > retryMaxMillisWhenOtherExceptionsOccur) {
+ LOGGER.warn(
+ "Unclassified exception: retry timeout. will be ignored. event: {}. status: {}",
+ shouldRecordIgnoredDataWhenOtherExceptionsOccur ? recordMessage : "not recorded",
+ status);
+ resetExceptionStatus();
+ return;
+ }
+
+ LOGGER.warn(
+ "Unclassified exception: will retry {}. status: {}",
+ retryMaxMillisWhenOtherExceptionsOccur == Long.MAX_VALUE
+ ? "forever"
+ : "for at least "
+ + (retryMaxMillisWhenOtherExceptionsOccur
+ + exceptionFirstEncounteredTime.get()
+ - System.currentTimeMillis())
+ / 1000.0
+ + " seconds",
+ status);
+ exceptionEventHasBeenRetried.set(true);
+ throw new PipeException(
+ exceptionMessage,
+ (int)
+ Math.max(
+ 5,
+ Math.min(
+ CONFLICT_RETRY_MAX_TIMES, retryMaxMillisWhenOtherExceptionsOccur * 1.1)));
+ }
+ }
+ }
+
+ private void recordExceptionStatusIfNecessary(final String message) {
+ if (!Objects.equals(exceptionRecordedMessage.get(), message)) {
+ exceptionFirstEncounteredTime.set(System.currentTimeMillis());
+ exceptionEventHasBeenRetried.set(false);
+ exceptionRecordedMessage.set(message);
+ }
+ }
+
+ private void resetExceptionStatus() {
+ exceptionFirstEncounteredTime.set(0);
+ exceptionEventHasBeenRetried.set(false);
+ exceptionRecordedMessage.set("");
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/protocol/session/IClientSession.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/protocol/session/IClientSession.java
new file mode 100644
index 0000000..0a1b414
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/sink/protocol/session/IClientSession.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.sink.protocol.session;
+
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+public abstract class IClientSession {
+
+ private long id;
+
+ private String username;
+
+ /** ip:port for thrift-based service and client id for mqtt-based service. */
+ abstract String getConnectionId();
+
+ public String getUsername() {
+ return this.username;
+ }
+
+ public long getId() {
+ return id;
+ }
+
+ public void setId(long id) {
+ this.id = id;
+ }
+
+ public String toString() {
+ return String.format("%d-%s:%s", getId(), getUsername(), getConnectionId());
+ }
+
+ public void setUsername(String username) {
+ this.username = username;
+ }
+
+ public enum SqlDialect {
+ TREE((byte) 0),
+ TABLE((byte) 1);
+
+ private final byte dialect;
+
+ SqlDialect(byte dialect) {
+ this.dialect = dialect;
+ }
+
+ public void serialize(final DataOutputStream stream) throws IOException {
+ ReadWriteIOUtils.write(dialect, stream);
+ }
+
+ public void serialize(final ByteBuffer buffer) {
+ ReadWriteIOUtils.write(dialect, buffer);
+ }
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/HttpPullSource.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/HttpPullSource.java
index afb8d15..ff4cffe 100644
--- a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/HttpPullSource.java
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/HttpPullSource.java
@@ -21,7 +21,6 @@
import org.apache.iotdb.collector.plugin.api.PullSource;
import org.apache.iotdb.collector.plugin.api.event.DemoEvent;
-import org.apache.iotdb.pipe.api.customizer.configuration.PipeExtractorRuntimeConfiguration;
import org.apache.iotdb.pipe.api.customizer.configuration.PipeSourceRuntimeConfiguration;
import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameterValidator;
import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters;
@@ -41,11 +40,6 @@ public class HttpPullSource extends PullSource {
@Override
public void validate(PipeParameterValidator pipeParameterValidator) {}
- @Override
- public void customize(
- PipeParameters pipeParameters,
- PipeExtractorRuntimeConfiguration pipeExtractorRuntimeConfiguration) {}
-
@Override
public void customize(
PipeParameters pipeParameters,
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/HttpPushSource.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/HttpPushSource.java
index fa81fc2..2225829 100644
--- a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/HttpPushSource.java
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/HttpPushSource.java
@@ -21,7 +21,6 @@
import org.apache.iotdb.collector.plugin.api.PushSource;
import org.apache.iotdb.collector.plugin.api.event.DemoEvent;
-import org.apache.iotdb.pipe.api.customizer.configuration.PipeExtractorRuntimeConfiguration;
import org.apache.iotdb.pipe.api.customizer.configuration.PipeSourceRuntimeConfiguration;
import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameterValidator;
import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters;
@@ -43,10 +42,6 @@ public class HttpPushSource extends PushSource {
@Override
public void validate(final PipeParameterValidator validator) {}
- @Override
- public void customize(
- final PipeParameters parameters, final PipeExtractorRuntimeConfiguration configuration) {}
-
@Override
public void customize(
final PipeParameters parameters, final PipeSourceRuntimeConfiguration configuration) {}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/IoTDBPushSource.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/IoTDBPushSource.java
new file mode 100644
index 0000000..6631569
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/IoTDBPushSource.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.source;
+
+import org.apache.iotdb.collector.plugin.api.PushSource;
+import org.apache.iotdb.collector.plugin.builtin.source.constant.IoTDBPushSourceConstant;
+import org.apache.iotdb.collector.plugin.builtin.source.event.SubDemoEvent;
+import org.apache.iotdb.pipe.api.customizer.configuration.PipeSourceRuntimeConfiguration;
+import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameterValidator;
+import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters;
+import org.apache.iotdb.rpc.subscription.config.ConsumerConstant;
+import org.apache.iotdb.session.subscription.consumer.tree.SubscriptionTreePullConsumer;
+import org.apache.iotdb.session.subscription.payload.SubscriptionMessage;
+import org.apache.iotdb.session.subscription.payload.SubscriptionMessageType;
+import org.apache.iotdb.session.subscription.payload.SubscriptionSessionDataSet;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.Properties;
+
+public class IoTDBPushSource extends PushSource {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBPushSource.class);
+
+ private String host;
+ private Integer port;
+ private String topic;
+ private Long timeout;
+ private String deviceId;
+
+ private volatile boolean isStarted = true;
+ private Thread workerThread;
+
+ @Override
+ public void validate(PipeParameterValidator validator) throws Exception {}
+
+ @Override
+ public void customize(
+ PipeParameters pipeParameters, PipeSourceRuntimeConfiguration pipeSourceRuntimeConfiguration)
+ throws Exception {
+ host =
+ pipeParameters.getStringOrDefault(
+ IoTDBPushSourceConstant.HOST_KEY, IoTDBPushSourceConstant.HOST_VALUE);
+ port =
+ pipeParameters.getIntOrDefault(
+ IoTDBPushSourceConstant.PORT_KEY, IoTDBPushSourceConstant.PORT_VALUE);
+ topic =
+ pipeParameters.getStringOrDefault(
+ IoTDBPushSourceConstant.TOPIC_KEY, IoTDBPushSourceConstant.TOPIC_VALUE);
+ timeout =
+ pipeParameters.getLongOrDefault(
+ IoTDBPushSourceConstant.TIMEOUT_KEY, IoTDBPushSourceConstant.TIMEOUT_VALUE);
+ deviceId =
+ pipeParameters.getStringOrDefault(
+ IoTDBPushSourceConstant.DEVICE_ID_KEY, IoTDBPushSourceConstant.DEVICE_ID_VALUE);
+ }
+
+ @Override
+ public void start() throws Exception {
+ if (workerThread == null || !workerThread.isAlive()) {
+ isStarted = true;
+ workerThread = new Thread(this::doWork);
+ workerThread.start();
+ }
+ }
+
+ private void doWork() {
+ final Properties pullProperties = new Properties();
+ pullProperties.put(IoTDBPushSourceConstant.HOST_KEY, host);
+ pullProperties.put(IoTDBPushSourceConstant.PORT_KEY, port);
+ pullProperties.put(ConsumerConstant.CONSUMER_ID_KEY, "r1");
+ pullProperties.put(ConsumerConstant.CONSUMER_GROUP_ID_KEY, "rg1");
+
+ try (final SubscriptionTreePullConsumer consumer =
+ new SubscriptionTreePullConsumer(pullProperties)) {
+ consumer.open();
+ consumer.subscribe(topic);
+
+ while (isStarted && !Thread.currentThread().isInterrupted()) {
+ final List messages = consumer.poll(timeout);
+ for (final SubscriptionMessage message : messages) {
+ final short messageType = message.getMessageType();
+ if (SubscriptionMessageType.isValidatedMessageType(messageType)) {
+ for (final SubscriptionSessionDataSet dataSet : message.getSessionDataSetsHandler()) {
+ final SubDemoEvent event = new SubDemoEvent(dataSet.getTablet(), deviceId);
+ supply(event);
+ }
+ }
+ }
+ }
+ } catch (final Exception e) {
+ Thread.currentThread().interrupt();
+ LOGGER.error("Error in push source", e);
+ }
+ }
+
+ @Override
+ public void close() throws Exception {
+ isStarted = false;
+ if (workerThread != null) {
+ workerThread.interrupt();
+ try {
+ workerThread.join(1000);
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+ workerThread = null;
+ }
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/constant/IoTDBPushSourceConstant.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/constant/IoTDBPushSourceConstant.java
new file mode 100644
index 0000000..9bc516e
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/constant/IoTDBPushSourceConstant.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.source.constant;
+
+public class IoTDBPushSourceConstant {
+ public static final String HOST_KEY = "host";
+ public static final String PORT_KEY = "port";
+ public static final String TOPIC_KEY = "topic";
+ public static final String TIMEOUT_KEY = "timeout";
+ public static final String DEVICE_ID_KEY = "deviceId";
+
+ public static final String HOST_VALUE = "127.0.0.1";
+ public static final Integer PORT_VALUE = 6668;
+ public static final String TOPIC_VALUE = "root_all";
+ public static final Long TIMEOUT_VALUE = 10000L;
+ public static final String DEVICE_ID_VALUE = "root.test.demo";
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/event/SubDemoEvent.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/event/SubDemoEvent.java
new file mode 100644
index 0000000..8eb95a5
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/event/SubDemoEvent.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.source.event;
+
+import org.apache.iotdb.collector.plugin.builtin.sink.event.PipeRawTabletInsertionEvent;
+import org.apache.iotdb.collector.plugin.builtin.source.event.common.PipeRowCollector;
+import org.apache.iotdb.pipe.api.collector.RowCollector;
+import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent;
+
+import org.apache.tsfile.write.record.Tablet;
+
+import java.util.function.BiConsumer;
+
+public class SubDemoEvent extends PipeRawTabletInsertionEvent {
+
+ public SubDemoEvent(Tablet tablet, String deviceId) {
+ super(tablet, deviceId);
+ }
+
+ @Override
+ public Iterable processTablet(
+ final BiConsumer consumer) {
+ final PipeRowCollector collector = new PipeRowCollector();
+ consumer.accept(tablet, collector);
+ return collector.convertToTabletInsertionEvents(false);
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/event/common/PipeBinaryTransformer.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/event/common/PipeBinaryTransformer.java
new file mode 100644
index 0000000..11c2dd7
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/event/common/PipeBinaryTransformer.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.source.event.common;
+
+public class PipeBinaryTransformer {
+
+ public static org.apache.tsfile.utils.Binary transformToBinary(
+ org.apache.iotdb.pipe.api.type.Binary binary) {
+ return binary == null ? null : new org.apache.tsfile.utils.Binary(binary.getValues());
+ }
+
+ public static org.apache.iotdb.pipe.api.type.Binary transformToPipeBinary(
+ org.apache.tsfile.utils.Binary binary) {
+ return binary == null ? null : new org.apache.iotdb.pipe.api.type.Binary(binary.getValues());
+ }
+
+ private PipeBinaryTransformer() {
+ // util class
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/event/common/PipeDataTypeTransformer.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/event/common/PipeDataTypeTransformer.java
new file mode 100644
index 0000000..90c3aa3
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/event/common/PipeDataTypeTransformer.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.source.event.common;
+
+import org.apache.iotdb.pipe.api.type.Type;
+
+import org.apache.tsfile.enums.TSDataType;
+
+import java.util.List;
+import java.util.stream.Collectors;
+
+/** Transform between {@link TSDataType} and {@link Type}. */
+public class PipeDataTypeTransformer {
+
+ public static List transformToPipeDataTypeList(final List tsDataTypeList) {
+ return tsDataTypeList == null
+ ? null
+ : tsDataTypeList.stream()
+ .map(PipeDataTypeTransformer::transformToPipeDataType)
+ .collect(Collectors.toList());
+ }
+
+ public static Type transformToPipeDataType(final TSDataType tsDataType) {
+ return tsDataType == null ? null : getPipeDataType(tsDataType.getType());
+ }
+
+ private static Type getPipeDataType(final byte type) {
+ switch (type) {
+ case 0:
+ return Type.BOOLEAN;
+ case 1:
+ return Type.INT32;
+ case 2:
+ return Type.INT64;
+ case 3:
+ return Type.FLOAT;
+ case 4:
+ return Type.DOUBLE;
+ case 5:
+ return Type.TEXT;
+ case 8:
+ return Type.TIMESTAMP;
+ case 9:
+ return Type.DATE;
+ case 10:
+ return Type.BLOB;
+ case 11:
+ return Type.STRING;
+ default:
+ throw new IllegalArgumentException("Invalid input: " + type);
+ }
+ }
+
+ private PipeDataTypeTransformer() {
+ // util class
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/event/common/PipeResetTabletRow.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/event/common/PipeResetTabletRow.java
new file mode 100644
index 0000000..6c81681
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/event/common/PipeResetTabletRow.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.source.event.common;
+
+import org.apache.tsfile.enums.TSDataType;
+import org.apache.tsfile.utils.BitMap;
+import org.apache.tsfile.write.record.Tablet;
+import org.apache.tsfile.write.schema.MeasurementSchema;
+
+/**
+ * The pipe framework will reset a new {@link Tablet} when this kind of {@link PipeRow} is
+ * encountered.
+ */
+public class PipeResetTabletRow extends PipeRow {
+
+ public PipeResetTabletRow(
+ int rowIndex,
+ String deviceId,
+ boolean isAligned,
+ MeasurementSchema[] measurementSchemaList,
+ long[] timestampColumn,
+ TSDataType[] valueColumnTypes,
+ Object[] valueColumns,
+ BitMap[] bitMaps,
+ String[] columnNameStringList) {
+ super(
+ rowIndex,
+ deviceId,
+ isAligned,
+ measurementSchemaList,
+ timestampColumn,
+ valueColumnTypes,
+ valueColumns,
+ bitMaps,
+ columnNameStringList);
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/event/common/PipeRow.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/event/common/PipeRow.java
new file mode 100644
index 0000000..8725c96
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/event/common/PipeRow.java
@@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.source.event.common;
+
+import org.apache.iotdb.pipe.api.access.Row;
+import org.apache.iotdb.pipe.api.exception.PipeParameterNotValidException;
+import org.apache.iotdb.pipe.api.type.Type;
+
+import org.apache.tsfile.common.conf.TSFileConfig;
+import org.apache.tsfile.enums.TSDataType;
+import org.apache.tsfile.read.common.Path;
+import org.apache.tsfile.utils.BitMap;
+import org.apache.tsfile.write.schema.IMeasurementSchema;
+
+import java.time.LocalDate;
+import java.util.Arrays;
+import java.util.List;
+
+public class PipeRow implements Row {
+
+ protected final int rowIndex;
+
+ protected final String deviceId;
+ protected final boolean isAligned;
+ protected final IMeasurementSchema[] measurementSchemaList;
+
+ protected final long[] timestampColumn;
+ protected final TSDataType[] valueColumnTypes;
+ protected final Object[] valueColumns;
+ protected final BitMap[] bitMaps;
+
+ protected final String[] columnNameStringList;
+
+ public PipeRow(
+ final int rowIndex,
+ final String deviceId,
+ final boolean isAligned,
+ final IMeasurementSchema[] measurementSchemaList,
+ final long[] timestampColumn,
+ final TSDataType[] valueColumnTypes,
+ final Object[] valueColumns,
+ final BitMap[] bitMaps,
+ final String[] columnNameStringList) {
+ this.rowIndex = rowIndex;
+ this.deviceId = deviceId;
+ this.isAligned = isAligned;
+ this.measurementSchemaList = measurementSchemaList;
+ this.timestampColumn = timestampColumn;
+ this.valueColumnTypes = valueColumnTypes;
+ this.valueColumns = valueColumns;
+ this.bitMaps = bitMaps;
+ this.columnNameStringList = columnNameStringList;
+ }
+
+ @Override
+ public long getTime() {
+ return timestampColumn[rowIndex];
+ }
+
+ @Override
+ public int getInt(final int columnIndex) {
+ return ((int[]) valueColumns[columnIndex])[rowIndex];
+ }
+
+ @Override
+ public LocalDate getDate(final int columnIndex) {
+ return ((LocalDate[]) valueColumns[columnIndex])[rowIndex];
+ }
+
+ @Override
+ public long getLong(final int columnIndex) {
+ return ((long[]) valueColumns[columnIndex])[rowIndex];
+ }
+
+ @Override
+ public float getFloat(final int columnIndex) {
+ return ((float[]) valueColumns[columnIndex])[rowIndex];
+ }
+
+ @Override
+ public double getDouble(final int columnIndex) {
+ return ((double[]) valueColumns[columnIndex])[rowIndex];
+ }
+
+ @Override
+ public boolean getBoolean(final int columnIndex) {
+ return ((boolean[]) valueColumns[columnIndex])[rowIndex];
+ }
+
+ @Override
+ public org.apache.iotdb.pipe.api.type.Binary getBinary(final int columnIndex) {
+ return PipeBinaryTransformer.transformToPipeBinary(
+ ((org.apache.tsfile.utils.Binary[]) valueColumns[columnIndex])[rowIndex]);
+ }
+
+ @Override
+ public String getString(final int columnIndex) {
+ final org.apache.tsfile.utils.Binary binary =
+ ((org.apache.tsfile.utils.Binary[]) valueColumns[columnIndex])[rowIndex];
+ return binary == null ? null : binary.getStringValue(TSFileConfig.STRING_CHARSET);
+ }
+
+ @Override
+ public Object getObject(final int columnIndex) {
+ switch (getDataType(columnIndex)) {
+ case INT32:
+ return getInt(columnIndex);
+ case DATE:
+ return getDate(columnIndex);
+ case INT64:
+ case TIMESTAMP:
+ return getLong(columnIndex);
+ case FLOAT:
+ return getFloat(columnIndex);
+ case DOUBLE:
+ return getDouble(columnIndex);
+ case BOOLEAN:
+ return getBoolean(columnIndex);
+ case TEXT:
+ case BLOB:
+ case STRING:
+ return getBinary(columnIndex);
+ default:
+ throw new UnsupportedOperationException(
+ String.format(
+ "unsupported data type %s for column %s",
+ getDataType(columnIndex), columnNameStringList[columnIndex]));
+ }
+ }
+
+ @Override
+ public Type getDataType(final int columnIndex) {
+ return PipeDataTypeTransformer.transformToPipeDataType(valueColumnTypes[columnIndex]);
+ }
+
+ @Override
+ public boolean isNull(final int columnIndex) {
+ return bitMaps[columnIndex].isMarked(rowIndex);
+ }
+
+ @Override
+ public int size() {
+ return valueColumns.length;
+ }
+
+ @Override
+ public int getColumnIndex(final Path columnName) throws PipeParameterNotValidException {
+ for (int i = 0; i < columnNameStringList.length; i++) {
+ if (columnNameStringList[i].equals(columnName.getFullPath())) {
+ return i;
+ }
+ }
+ throw new PipeParameterNotValidException(
+ String.format("column %s not found", columnName.getFullPath()));
+ }
+
+ @Override
+ public String getColumnName(final int columnIndex) {
+ return columnNameStringList[columnIndex];
+ }
+
+ @Override
+ public List getColumnTypes() {
+ return PipeDataTypeTransformer.transformToPipeDataTypeList(Arrays.asList(valueColumnTypes));
+ }
+
+ @Override
+ public String getDeviceId() {
+ return deviceId;
+ }
+
+ public boolean isAligned() {
+ return isAligned;
+ }
+
+ public int getCurrentRowSize() {
+ int rowSize = 0;
+ rowSize += 8; // timestamp
+ for (int i = 0; i < valueColumnTypes.length; i++) {
+ if (valueColumnTypes[i] != null) {
+ if (valueColumnTypes[i].isBinary()) {
+ rowSize += getBinary(i) != null ? getBinary(i).getLength() : 0;
+ } else {
+ rowSize += valueColumnTypes[i].getDataTypeSize();
+ }
+ }
+ }
+ return rowSize;
+ }
+
+ public IMeasurementSchema[] getMeasurementSchemaList() {
+ return measurementSchemaList;
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/event/common/PipeRowCollector.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/event/common/PipeRowCollector.java
new file mode 100644
index 0000000..c2e31d6
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/plugin/builtin/source/event/common/PipeRowCollector.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.plugin.builtin.source.event.common;
+
+import org.apache.iotdb.collector.plugin.builtin.sink.event.PipeRawTabletInsertionEvent;
+import org.apache.iotdb.collector.utils.PipeMemoryWeightUtil;
+import org.apache.iotdb.pipe.api.access.Row;
+import org.apache.iotdb.pipe.api.collector.RowCollector;
+import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent;
+import org.apache.iotdb.pipe.api.exception.PipeException;
+import org.apache.iotdb.pipe.api.type.Binary;
+
+import org.apache.tsfile.utils.Pair;
+import org.apache.tsfile.write.record.Tablet;
+import org.apache.tsfile.write.schema.IMeasurementSchema;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+public class PipeRowCollector implements RowCollector {
+
+ private final List tabletInsertionEventList = new ArrayList<>();
+ private Tablet tablet = null;
+
+ @Override
+ public void collectRow(Row row) {
+ if (!(row instanceof PipeRow)) {
+ throw new PipeException("Row can not be customized");
+ }
+
+ final PipeRow pipeRow = (PipeRow) row;
+ final IMeasurementSchema[] measurementSchemaArray = pipeRow.getMeasurementSchemaList();
+
+ // Trigger collection when a PipeResetTabletRow is encountered
+ if (row instanceof PipeResetTabletRow) {
+ collectTabletInsertionEvent();
+ }
+
+ if (tablet == null) {
+ final String deviceId = pipeRow.getDeviceId();
+ final List measurementSchemaList =
+ new ArrayList<>(Arrays.asList(measurementSchemaArray));
+ // Calculate row count and memory size of the tablet based on the first row
+ Pair rowCountAndMemorySize =
+ PipeMemoryWeightUtil.calculateTabletRowCountAndMemory(pipeRow);
+ tablet = new Tablet(deviceId, measurementSchemaList, rowCountAndMemorySize.getLeft());
+ tablet.initBitMaps();
+ }
+
+ final int rowIndex = tablet.getRowSize();
+ tablet.addTimestamp(rowIndex, row.getTime());
+ for (int i = 0; i < row.size(); i++) {
+ final Object value = row.getObject(i);
+ if (value instanceof Binary) {
+ tablet.addValue(
+ measurementSchemaArray[i].getMeasurementName(),
+ rowIndex,
+ PipeBinaryTransformer.transformToBinary((Binary) value));
+ } else {
+ tablet.addValue(measurementSchemaArray[i].getMeasurementName(), rowIndex, value);
+ }
+ if (row.isNull(i)) {
+ tablet.getBitMaps()[i].mark(rowIndex);
+ }
+ }
+
+ if (tablet.getRowSize() == tablet.getMaxRowNumber()) {
+ collectTabletInsertionEvent();
+ }
+ }
+
+ private void collectTabletInsertionEvent() {
+ if (tablet != null) {
+ // TODO: non-PipeInsertionEvent sourceEvent is not supported?
+ tabletInsertionEventList.add(new PipeRawTabletInsertionEvent(tablet, tablet.getDeviceId()));
+ }
+ this.tablet = null;
+ }
+
+ public List convertToTabletInsertionEvents(final boolean shouldReport) {
+ collectTabletInsertionEvent();
+
+ return tabletInsertionEventList;
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/plugin/PluginRuntime.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/plugin/PluginRuntime.java
index 627ace6..0b4d5a5 100644
--- a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/plugin/PluginRuntime.java
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/plugin/PluginRuntime.java
@@ -26,8 +26,8 @@
import org.apache.iotdb.collector.runtime.plugin.load.PluginClassLoaderManager;
import org.apache.iotdb.collector.runtime.plugin.meta.PluginMeta;
import org.apache.iotdb.collector.runtime.plugin.meta.PluginMetaKeeper;
-import org.apache.iotdb.collector.runtime.plugin.utils.PluginFileUtils;
import org.apache.iotdb.collector.service.PersistenceService;
+import org.apache.iotdb.collector.utils.PluginFileUtils;
import org.apache.iotdb.pipe.api.PipePlugin;
import org.apache.iotdb.pipe.api.PipeProcessor;
import org.apache.iotdb.pipe.api.PipeSink;
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/plugin/constructor/ProcessorConstructor.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/plugin/constructor/ProcessorConstructor.java
index 43620b3..1e69650 100644
--- a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/plugin/constructor/ProcessorConstructor.java
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/plugin/constructor/ProcessorConstructor.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.collector.plugin.builtin.BuiltinPlugin;
import org.apache.iotdb.collector.plugin.builtin.processor.DoNothingProcessor;
+import org.apache.iotdb.collector.plugin.builtin.processor.SubscriptionProcessor;
import org.apache.iotdb.collector.runtime.plugin.meta.PluginMetaKeeper;
import org.apache.iotdb.pipe.api.PipeProcessor;
import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters;
@@ -35,6 +36,8 @@ public ProcessorConstructor(PluginMetaKeeper pluginMetaKeeper) {
protected void initConstructors() {
pluginConstructors.put(
BuiltinPlugin.DO_NOTHING_PROCESSOR.getPluginName(), DoNothingProcessor::new);
+ pluginConstructors.put(
+ BuiltinPlugin.SUBSCRIPTION_PROCESSOR.getPluginName(), SubscriptionProcessor::new);
}
@Override
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/plugin/constructor/SinkConstructor.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/plugin/constructor/SinkConstructor.java
index f5dd6be..3786dd8 100644
--- a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/plugin/constructor/SinkConstructor.java
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/plugin/constructor/SinkConstructor.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.collector.plugin.builtin.BuiltinPlugin;
import org.apache.iotdb.collector.plugin.builtin.sink.DemoSink;
+import org.apache.iotdb.collector.plugin.builtin.sink.protocol.IoTDBDataRegionSyncConnector;
import org.apache.iotdb.collector.runtime.plugin.meta.PluginMetaKeeper;
import org.apache.iotdb.pipe.api.PipeSink;
import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters;
@@ -33,7 +34,9 @@ public SinkConstructor(PluginMetaKeeper pluginMetaKeeper) {
@Override
protected void initConstructors() {
- pluginConstructors.put(BuiltinPlugin.IOTDB_THRIFT_SINK.getPluginName(), DemoSink::new);
+ pluginConstructors.put(BuiltinPlugin.IOTDB_DEMO_SINK.getPluginName(), DemoSink::new);
+ pluginConstructors.put(
+ BuiltinPlugin.IOTDB_SYNC_SINK.getPluginName(), IoTDBDataRegionSyncConnector::new);
}
@Override
@@ -45,7 +48,7 @@ public final PipeSink reflectPlugin(PipeParameters sinkParameters) {
return (PipeSink)
reflectPluginByKey(
sinkParameters
- .getStringOrDefault("sink", BuiltinPlugin.IOTDB_THRIFT_SINK.getPluginName())
+ .getStringOrDefault("sink", BuiltinPlugin.IOTDB_DEMO_SINK.getPluginName())
.toLowerCase());
}
}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/plugin/constructor/SourceConstructor.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/plugin/constructor/SourceConstructor.java
index 2899862..7097ce5 100644
--- a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/plugin/constructor/SourceConstructor.java
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/plugin/constructor/SourceConstructor.java
@@ -24,6 +24,7 @@
import org.apache.iotdb.collector.plugin.builtin.BuiltinPlugin;
import org.apache.iotdb.collector.plugin.builtin.source.HttpPullSource;
import org.apache.iotdb.collector.plugin.builtin.source.HttpPushSource;
+import org.apache.iotdb.collector.plugin.builtin.source.IoTDBPushSource;
import org.apache.iotdb.collector.runtime.plugin.meta.PluginMetaKeeper;
import org.apache.iotdb.pipe.api.PipeSource;
import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters;
@@ -38,6 +39,7 @@ public SourceConstructor(PluginMetaKeeper pluginMetaKeeper) {
protected void initConstructors() {
pluginConstructors.put(BuiltinPlugin.HTTP_PULL_SOURCE.getPluginName(), HttpPullSource::new);
pluginConstructors.put(BuiltinPlugin.HTTP_PUSH_SOURCE.getPluginName(), HttpPushSource::new);
+ pluginConstructors.put(BuiltinPlugin.SUBSCRIPTION_SOURCE.getPluginName(), IoTDBPushSource::new);
}
@Override
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/task/TaskRuntime.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/task/TaskRuntime.java
index f96a40f..a3e4204 100644
--- a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/task/TaskRuntime.java
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/task/TaskRuntime.java
@@ -82,6 +82,8 @@ public synchronized Response createTask(
.entity(String.format("Successfully created task %s", taskId))
.build();
} catch (final Exception e) {
+ tasks.remove(taskId);
+
LOGGER.warn("Failed to create task {} because {}", taskId, e.getMessage(), e);
return Response.serverError()
.entity(String.format("Failed to create task %s, because %s", taskId, e.getMessage()))
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/task/processor/ProcessorTask.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/task/processor/ProcessorTask.java
index 37fbf4e..7b98922 100644
--- a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/task/processor/ProcessorTask.java
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/task/processor/ProcessorTask.java
@@ -20,13 +20,13 @@
package org.apache.iotdb.collector.runtime.task.processor;
import org.apache.iotdb.collector.plugin.api.customizer.CollectorProcessorRuntimeConfiguration;
+import org.apache.iotdb.collector.plugin.api.event.PeriodicalEvent;
import org.apache.iotdb.collector.runtime.plugin.PluginRuntime;
import org.apache.iotdb.collector.runtime.task.Task;
import org.apache.iotdb.collector.runtime.task.event.EventCollector;
import org.apache.iotdb.collector.runtime.task.event.EventContainer;
+import org.apache.iotdb.collector.service.PeriodicalJobService;
import org.apache.iotdb.collector.service.RuntimeService;
-import org.apache.iotdb.commons.concurrent.IoTThreadFactory;
-import org.apache.iotdb.commons.concurrent.threadpool.WrappedThreadPoolExecutor;
import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameterValidator;
import com.lmax.disruptor.BlockingWaitStrategy;
@@ -39,6 +39,7 @@
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import static org.apache.iotdb.collector.config.TaskRuntimeOptions.TASK_PROCESSOR_RING_BUFFER_SIZE;
@@ -67,14 +68,12 @@ public ProcessorTask(
REGISTERED_EXECUTOR_SERVICES.putIfAbsent(
taskId,
- new WrappedThreadPoolExecutor(
+ new ThreadPoolExecutor(
parallelism,
parallelism,
0L,
TimeUnit.SECONDS,
- new LinkedBlockingQueue<>(parallelism),
- new IoTThreadFactory(taskId), // TODO: thread name
- taskId));
+ new LinkedBlockingQueue<>(parallelism))); // TODO: thread name
disruptor =
new Disruptor<>(
@@ -121,16 +120,21 @@ public void createInternal() throws Exception {
disruptor.setDefaultExceptionHandler(new ProcessorExceptionHandler());
disruptor.start();
+
+ // Scheduled and proactive sink actions
+ PeriodicalJobService.register(taskId, () -> sinkProducer.collect(new PeriodicalEvent()));
}
@Override
public void startInternal() {
- // do nothing
+ // resume proactive sink actions
+ PeriodicalJobService.resumeSingleTask(taskId);
}
@Override
public void stopInternal() {
- // do nothing
+ // pause proactive sink actions
+ PeriodicalJobService.pauseSingleTask(taskId);
}
@Override
@@ -145,6 +149,9 @@ public void dropInternal() {
}
}
+ // remove proactive sink actions
+ PeriodicalJobService.deregister(taskId);
+
disruptor.shutdown();
final ExecutorService executorService = REGISTERED_EXECUTOR_SERVICES.remove(taskId);
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/task/sink/SinkTask.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/task/sink/SinkTask.java
index d540b92..e63ae84 100644
--- a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/task/sink/SinkTask.java
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/task/sink/SinkTask.java
@@ -25,8 +25,6 @@
import org.apache.iotdb.collector.runtime.task.event.EventCollector;
import org.apache.iotdb.collector.runtime.task.event.EventContainer;
import org.apache.iotdb.collector.service.RuntimeService;
-import org.apache.iotdb.commons.concurrent.IoTThreadFactory;
-import org.apache.iotdb.commons.concurrent.threadpool.WrappedThreadPoolExecutor;
import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameterValidator;
import com.lmax.disruptor.BlockingWaitStrategy;
@@ -39,6 +37,7 @@
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import static org.apache.iotdb.collector.config.TaskRuntimeOptions.TASK_SINK_PARALLELISM_NUM;
@@ -59,14 +58,12 @@ public SinkTask(final String taskId, final Map attributes) {
REGISTERED_EXECUTOR_SERVICES.putIfAbsent(
taskId,
- new WrappedThreadPoolExecutor(
+ new ThreadPoolExecutor(
parallelism,
parallelism,
0L,
TimeUnit.SECONDS,
- new LinkedBlockingQueue<>(parallelism),
- new IoTThreadFactory(taskId), // TODO: thread name
- taskId));
+ new LinkedBlockingQueue<>(parallelism))); // TODO: thread name
disruptor =
new Disruptor<>(
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/task/source/pull/PullSourceTask.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/task/source/pull/PullSourceTask.java
index d426526..219dfcf 100644
--- a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/task/source/pull/PullSourceTask.java
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/task/source/pull/PullSourceTask.java
@@ -26,8 +26,6 @@
import org.apache.iotdb.collector.runtime.task.event.EventCollector;
import org.apache.iotdb.collector.runtime.task.source.SourceTask;
import org.apache.iotdb.collector.service.RuntimeService;
-import org.apache.iotdb.commons.concurrent.IoTThreadFactory;
-import org.apache.iotdb.commons.concurrent.threadpool.WrappedThreadPoolExecutor;
import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameterValidator;
import org.slf4j.Logger;
@@ -37,6 +35,7 @@
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
public class PullSourceTask extends SourceTask {
@@ -66,14 +65,12 @@ public void createInternal() throws Exception {
REGISTERED_EXECUTOR_SERVICES.putIfAbsent(
taskId,
- new WrappedThreadPoolExecutor(
+ new ThreadPoolExecutor(
parallelism,
parallelism,
0L,
TimeUnit.SECONDS,
- new LinkedBlockingQueue<>(parallelism),
- new IoTThreadFactory(taskId), // TODO: thread name
- taskId));
+ new LinkedBlockingQueue<>(parallelism))); // TODO: thread name
final long creationTime = System.currentTimeMillis();
consumers = new PullSourceConsumer[parallelism];
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/service/PeriodicalJobService.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/service/PeriodicalJobService.java
new file mode 100644
index 0000000..f04cbed
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/service/PeriodicalJobService.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.service;
+
+import org.apache.iotdb.collector.config.PipeRuntimeOptions;
+import org.apache.iotdb.collector.utils.preiodical.ScheduledExecutorUtil;
+import org.apache.iotdb.collector.utils.preiodical.WrappedRunnable;
+
+import org.apache.tsfile.utils.Pair;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.Future;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+public class PeriodicalJobService implements IService {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(PeriodicalJobService.class);
+
+ private static final ScheduledExecutorService executorService =
+ new ScheduledThreadPoolExecutor(1);
+
+ private Future> executorFuture;
+
+ // String: task id
+ // Boolean: task status, if task pause, skip execute
+ // WrappedRunnable: periodical job
+ private static final List>> PERIODICAL_JOBS =
+ new CopyOnWriteArrayList<>();
+
+ @Override
+ public void start() {
+ if (executorFuture == null) {
+
+ executorFuture =
+ ScheduledExecutorUtil.safelyScheduleWithFixedDelay(
+ executorService,
+ this::execute,
+ PipeRuntimeOptions.EXECUTOR_CRON_HEARTBEAT_EVENT_INTERVAL_SECONDS.value(),
+ PipeRuntimeOptions.EXECUTOR_CRON_HEARTBEAT_EVENT_INTERVAL_SECONDS.value(),
+ TimeUnit.SECONDS);
+
+ LOGGER.info("Periodical Job Service started successfully.");
+ }
+ }
+
+ protected void execute() {
+ for (final Pair> periodicalJob : PERIODICAL_JOBS) {
+ if (periodicalJob.right.left) {
+ periodicalJob.right.right.run();
+ }
+ }
+ }
+
+ public static synchronized void register(final String taskId, final Runnable periodicalJob) {
+ PERIODICAL_JOBS.add(
+ new Pair<>(
+ taskId,
+ new Pair<>(
+ true,
+ new WrappedRunnable() {
+ @Override
+ public void runMayThrow() {
+ try {
+ periodicalJob.run();
+ } catch (final Exception e) {
+ LOGGER.warn("Periodical job {} failed.", taskId, e);
+ }
+ }
+ })));
+ }
+
+ public static synchronized void deregister(final String taskId) {
+ PERIODICAL_JOBS.removeIf(pair -> pair.left.equals(taskId));
+ }
+
+ public static synchronized void resumeSingleTask(final String taskId) {
+ PERIODICAL_JOBS.forEach(
+ pair -> {
+ if (pair.getLeft().equals(taskId)) {
+ pair.right.left = true;
+ }
+ });
+ }
+
+ public static synchronized void pauseSingleTask(final String taskId) {
+ PERIODICAL_JOBS.forEach(
+ pair -> {
+ if (pair.getLeft().equals(taskId)) {
+ pair.right.left = false;
+ }
+ });
+ }
+
+ @Override
+ public synchronized void stop() {
+ if (executorFuture != null) {
+ executorFuture.cancel(false);
+ executorFuture = null;
+ LOGGER.info("Periodical Job Service stopped successfully.");
+ }
+ }
+
+ @Override
+ public String name() {
+ return "PeriodicalJobService";
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/utils/PathUtil.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/utils/PathUtil.java
new file mode 100644
index 0000000..a0ab021
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/utils/PathUtil.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.utils;
+
+public class PathUtil {}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/utils/PipeMemoryWeightUtil.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/utils/PipeMemoryWeightUtil.java
new file mode 100644
index 0000000..e0c086d
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/utils/PipeMemoryWeightUtil.java
@@ -0,0 +1,304 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.utils;
+
+import org.apache.iotdb.collector.plugin.builtin.source.event.common.PipeRow;
+
+import org.apache.tsfile.enums.TSDataType;
+import org.apache.tsfile.file.metadata.AbstractAlignedChunkMetadata;
+import org.apache.tsfile.file.metadata.ChunkMetadata;
+import org.apache.tsfile.file.metadata.IChunkMetadata;
+import org.apache.tsfile.file.metadata.IDeviceID;
+import org.apache.tsfile.read.common.BatchData;
+import org.apache.tsfile.read.common.Chunk;
+import org.apache.tsfile.read.common.Field;
+import org.apache.tsfile.read.common.RowRecord;
+import org.apache.tsfile.utils.Binary;
+import org.apache.tsfile.utils.BitMap;
+import org.apache.tsfile.utils.Pair;
+import org.apache.tsfile.utils.TsPrimitiveType;
+import org.apache.tsfile.write.record.Tablet;
+import org.apache.tsfile.write.schema.IMeasurementSchema;
+
+import java.util.List;
+import java.util.Map;
+
+public class PipeMemoryWeightUtil {
+
+ /** Estimates memory usage of a {@link Map}<{@link IDeviceID}, {@link Boolean}>. */
+ public static long memoryOfIDeviceId2Bool(Map map) {
+ long usageInBytes = 0L;
+ for (Map.Entry entry : map.entrySet()) {
+ usageInBytes = usageInBytes + entry.getKey().ramBytesUsed() + 1L;
+ }
+ return usageInBytes + 16L; // add the overhead of map
+ }
+
+ /**
+ * Given a row of a tablet, calculate the row count and memory cost of the pipe tablet that will
+ * be constructed according to config.
+ *
+ * @return left is the row count of tablet, right is the memory cost of tablet in bytes
+ */
+ public static Pair calculateTabletRowCountAndMemory(RowRecord row) {
+ int totalSizeInBytes = 0;
+
+ // timestamp
+ totalSizeInBytes += 8L;
+
+ // values
+ final List fields = row.getFields();
+ int schemaCount = 0;
+ if (fields != null) {
+ schemaCount = fields.size();
+ for (final Field field : fields) {
+ if (field == null) {
+ continue;
+ }
+
+ final TSDataType tsDataType = field.getDataType();
+ if (tsDataType == null) {
+ continue;
+ }
+
+ if (tsDataType.isBinary()) {
+ final Binary binary = field.getBinaryV();
+ totalSizeInBytes += binary == null ? 0 : binary.getLength();
+ } else {
+ totalSizeInBytes += tsDataType.getDataTypeSize();
+ }
+ }
+ }
+
+ return calculateTabletRowCountAndMemoryBySize(totalSizeInBytes, schemaCount);
+ }
+
+ /**
+ * Given a BatchData, calculate the row count and memory cost of the pipe tablet that will be
+ * constructed according to config.
+ *
+ * @return left is the row count of tablet, right is the memory cost of tablet in bytes
+ */
+ public static Pair calculateTabletRowCountAndMemory(BatchData batchData) {
+ int totalSizeInBytes = 0;
+ int schemaCount = 0;
+
+ // timestamp
+ totalSizeInBytes += 8L;
+
+ // values
+ final TSDataType type = batchData.getDataType();
+ if (type != null) {
+ if (type == TSDataType.VECTOR && batchData.getVector() != null) {
+ schemaCount = batchData.getVector().length;
+ for (int i = 0; i < schemaCount; ++i) {
+ final TsPrimitiveType primitiveType = batchData.getVector()[i];
+ if (primitiveType == null || primitiveType.getDataType() == null) {
+ continue;
+ }
+
+ if (primitiveType.getDataType().isBinary()) {
+ final Binary binary = primitiveType.getBinary();
+ totalSizeInBytes += binary == null ? 0 : binary.getLength();
+ } else {
+ totalSizeInBytes += primitiveType.getDataType().getDataTypeSize();
+ }
+ }
+ } else {
+ schemaCount = 1;
+ if (type.isBinary()) {
+ final Binary binary = batchData.getBinary();
+ totalSizeInBytes += binary == null ? 0 : binary.getLength();
+ } else {
+ totalSizeInBytes += type.getDataTypeSize();
+ }
+ }
+ }
+
+ return calculateTabletRowCountAndMemoryBySize(totalSizeInBytes, schemaCount);
+ }
+
+ /**
+ * Given a row of a tablet, calculate the row count and memory cost of the pipe tablet that will
+ * be constructed according to config.
+ *
+ * @return left is the row count of tablet, right is the memory cost of tablet in bytes
+ */
+ public static Pair calculateTabletRowCountAndMemory(PipeRow row) {
+ return calculateTabletRowCountAndMemoryBySize(row.getCurrentRowSize(), row.size());
+ }
+
+ private static Pair calculateTabletRowCountAndMemoryBySize(
+ int rowSize, int schemaCount) {
+ if (rowSize <= 0) {
+ return new Pair<>(1, 0);
+ }
+
+ // Calculate row number according to the max size of a pipe tablet.
+ // "-100" is the estimated size of other data structures in a pipe tablet.
+ // "*8" converts bytes to bits, because the bitmap size is 1 bit per schema.
+ int rowNumber = 8 * (2097152 - 100) / (8 * rowSize + schemaCount);
+ rowNumber = Math.max(1, rowNumber);
+
+ if ( // This means the row number is larger than the max row count of a pipe tablet
+ rowNumber > 2048) {
+ // Bound the row number, the memory cost is rowSize * rowNumber
+ return new Pair<>(2048, rowSize * 2048);
+ } else {
+ return new Pair<>(rowNumber, 2097152);
+ }
+ }
+
+ public static long calculateTabletSizeInBytes(Tablet tablet) {
+ long totalSizeInBytes = 0;
+
+ if (tablet == null) {
+ return totalSizeInBytes;
+ }
+
+ long[] timestamps = tablet.getTimestamps();
+ Object[] tabletValues = tablet.getValues();
+
+ // timestamps
+ if (timestamps != null) {
+ totalSizeInBytes += timestamps.length * 8L;
+ }
+
+ // values
+ final List timeseries = tablet.getSchemas();
+ if (timeseries != null) {
+ for (int column = 0; column < timeseries.size(); column++) {
+ final IMeasurementSchema measurementSchema = timeseries.get(column);
+ if (measurementSchema == null) {
+ continue;
+ }
+
+ final TSDataType tsDataType = measurementSchema.getType();
+ if (tsDataType == null) {
+ continue;
+ }
+
+ if (tsDataType.isBinary()) {
+ if (tabletValues == null || tabletValues.length <= column) {
+ continue;
+ }
+ final Binary[] values = ((Binary[]) tabletValues[column]);
+ if (values == null) {
+ continue;
+ }
+ for (Binary value : values) {
+ totalSizeInBytes +=
+ value == null ? 0 : (value.getLength() == -1 ? 0 : value.getLength());
+ }
+ } else {
+ totalSizeInBytes += (long) timestamps.length * tsDataType.getDataTypeSize();
+ }
+ }
+ }
+
+ // bitMaps
+ BitMap[] bitMaps = tablet.getBitMaps();
+ if (bitMaps != null) {
+ for (int i = 0; i < bitMaps.length; i++) {
+ totalSizeInBytes += bitMaps[i] == null ? 0 : bitMaps[i].getSize();
+ }
+ }
+
+ // estimate other dataStructures size
+ totalSizeInBytes += 100;
+
+ return totalSizeInBytes;
+ }
+
+ public static int calculateBatchDataRamBytesUsed(BatchData batchData) {
+ int totalSizeInBytes = 0;
+
+ // timestamp
+ totalSizeInBytes += 8;
+
+ // values
+ final TSDataType type = batchData.getDataType();
+ if (type != null) {
+ if (type == TSDataType.VECTOR && batchData.getVector() != null) {
+ for (int i = 0; i < batchData.getVector().length; ++i) {
+ final TsPrimitiveType primitiveType = batchData.getVector()[i];
+ if (primitiveType == null || primitiveType.getDataType() == null) {
+ continue;
+ }
+ // consider variable references (plus 8) and memory alignment (round up to 8)
+ totalSizeInBytes += roundUpToMultiple(primitiveType.getSize() + 8, 8);
+ }
+ } else {
+ if (type.isBinary()) {
+ final Binary binary = batchData.getBinary();
+ // refer to org.apache.tsfile.utils.TsPrimitiveType.TsBinary.getSize
+ totalSizeInBytes +=
+ roundUpToMultiple((binary == null ? 8 : binary.getLength() + 8) + 8, 8);
+ } else {
+ totalSizeInBytes += roundUpToMultiple(TsPrimitiveType.getByType(type).getSize() + 8, 8);
+ }
+ }
+ }
+
+ return batchData.length() * totalSizeInBytes;
+ }
+
+ public static long calculateChunkRamBytesUsed(Chunk chunk) {
+ return chunk != null ? chunk.getRetainedSizeInBytes() : 0L;
+ }
+
+ public static long calculateAlignedChunkMetaBytesUsed(
+ AbstractAlignedChunkMetadata alignedChunkMetadata) {
+ if (alignedChunkMetadata == null) {
+ return 0L;
+ }
+
+ final ChunkMetadata timeChunkMetadata =
+ (ChunkMetadata) alignedChunkMetadata.getTimeChunkMetadata();
+ final List valueChunkMetadataList =
+ alignedChunkMetadata.getValueChunkMetadataList();
+
+ long size = timeChunkMetadata != null ? timeChunkMetadata.getRetainedSizeInBytes() : 0;
+ if (valueChunkMetadataList != null && !valueChunkMetadataList.isEmpty()) {
+ for (IChunkMetadata valueChunkMetadata : valueChunkMetadataList) {
+ if (valueChunkMetadata != null) {
+ size += ((ChunkMetadata) valueChunkMetadata).getRetainedSizeInBytes();
+ }
+ }
+ }
+
+ return size;
+ }
+
+ /**
+ * Rounds up the given integer num to the nearest multiple of n.
+ *
+ * @param num The integer to be rounded up.
+ * @param n The specified multiple.
+ * @return The nearest multiple of n greater than or equal to num.
+ */
+ private static int roundUpToMultiple(int num, int n) {
+ if (n == 0) {
+ throw new IllegalArgumentException("The multiple n must be greater than 0");
+ }
+ // Calculate the rounded up value to the nearest multiple of n
+ return ((num + n - 1) / n) * n;
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/plugin/utils/PluginFileUtils.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/utils/PluginFileUtils.java
similarity index 98%
rename from iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/plugin/utils/PluginFileUtils.java
rename to iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/utils/PluginFileUtils.java
index ebc693d..c343942 100644
--- a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/runtime/plugin/utils/PluginFileUtils.java
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/utils/PluginFileUtils.java
@@ -17,7 +17,7 @@
* under the License.
*/
-package org.apache.iotdb.collector.runtime.plugin.utils;
+package org.apache.iotdb.collector.utils;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.FilenameUtils;
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/utils/builder/PipeTableModeTsFileBuilder.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/utils/builder/PipeTableModeTsFileBuilder.java
new file mode 100644
index 0000000..98192d3
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/utils/builder/PipeTableModeTsFileBuilder.java
@@ -0,0 +1,273 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.utils.builder;
+
+import org.apache.iotdb.pipe.api.exception.PipeException;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.tsfile.exception.write.WriteProcessException;
+import org.apache.tsfile.file.metadata.IDeviceID;
+import org.apache.tsfile.file.metadata.TableSchema;
+import org.apache.tsfile.utils.Pair;
+import org.apache.tsfile.utils.WriteUtils;
+import org.apache.tsfile.write.record.Tablet;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedHashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.concurrent.atomic.AtomicLong;
+
+public class PipeTableModeTsFileBuilder extends PipeTsFileBuilder {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(PipeTableModeTsFileBuilder.class);
+
+ private final Map> dataBase2TabletList = new HashMap<>();
+
+ public PipeTableModeTsFileBuilder(AtomicLong currentBatchId, AtomicLong tsFileIdGenerator) {
+ super(currentBatchId, tsFileIdGenerator);
+ }
+
+ @Override
+ public void bufferTableModelTablet(String dataBase, Tablet tablet) {
+ dataBase2TabletList.computeIfAbsent(dataBase, db -> new ArrayList<>()).add(tablet);
+ }
+
+ @Override
+ public void bufferTreeModelTablet(Tablet tablet, Boolean isAligned) {
+ throw new UnsupportedOperationException(
+ "PipeTableModeTsFileBuilder does not support tree model tablet to build TSFile");
+ }
+
+ @Override
+ public List> convertTabletToTsFileWithDBInfo() throws IOException {
+ if (dataBase2TabletList.isEmpty()) {
+ return new ArrayList<>(0);
+ }
+ List> pairList = new ArrayList<>();
+ for (Map.Entry> entry : dataBase2TabletList.entrySet()) {
+ final LinkedHashSet>>>> linkedHashSet =
+ new LinkedHashSet<>();
+ pairList.addAll(
+ writeTableModelTabletsToTsFiles(entry.getValue(), entry.getKey(), linkedHashSet));
+ }
+ return pairList;
+ }
+
+ @Override
+ public boolean isEmpty() {
+ return dataBase2TabletList.isEmpty();
+ }
+
+ @Override
+ public synchronized void onSuccess() {
+ super.onSuccess();
+ dataBase2TabletList.clear();
+ }
+
+ @Override
+ public synchronized void close() {
+ super.close();
+ dataBase2TabletList.clear();
+ }
+
+ private >>>
+ List> writeTableModelTabletsToTsFiles(
+ final List tabletList,
+ final String dataBase,
+ LinkedHashSet> linkedHashSet)
+ throws IOException {
+
+ final Map> tableName2Tablets = new HashMap<>();
+
+ // Sort the tablets by dataBaseName
+ for (final Tablet tablet : tabletList) {
+ tableName2Tablets
+ .computeIfAbsent(tablet.getTableName(), k -> new ArrayList<>())
+ .add((T) new Pair<>(tablet, WriteUtils.splitTabletByDevice(tablet)));
+ }
+
+ // Replace ArrayList with LinkedList to improve performance
+ final LinkedHashSet> table2Tablets = new LinkedHashSet<>();
+
+ // Sort the tablets by start time in first device
+ for (final List tablets : tableName2Tablets.values()) {
+ tablets.sort(
+ (o1, o2) -> {
+ final IDeviceID deviceID = o1.right.get(0).left;
+ final int result;
+ if ((result = deviceID.compareTo(o2.right.get(0).left)) == 0) {
+ return Long.compare(o1.left.getTimestamp(0), o2.left.getTimestamp(0));
+ }
+ return result;
+ });
+ }
+
+ // Sort the tables by table name
+ tableName2Tablets.entrySet().stream()
+ .sorted(Map.Entry.comparingByKey(Comparator.naturalOrder()))
+ .forEach(entry -> linkedHashSet.add(new LinkedList<>(entry.getValue())));
+
+ // Help GC
+ tableName2Tablets.clear();
+
+ final List> sealedFiles = new ArrayList<>();
+
+ // Try making the tsfile size as large as possible
+ while (!linkedHashSet.isEmpty()) {
+ if (Objects.isNull(fileWriter)) {
+ createFileWriter();
+ }
+
+ try {
+ tryBestToWriteTabletsIntoOneFile(linkedHashSet);
+ } catch (final Exception e) {
+ LOGGER.warn(
+ "Batch id = {}: Failed to write tablets into tsfile, because {}",
+ currentBatchId.get(),
+ e.getMessage(),
+ e);
+
+ try {
+ fileWriter.close();
+ } catch (final Exception closeException) {
+ LOGGER.warn(
+ "Batch id = {}: Failed to close the tsfile {} after failed to write tablets into, because {}",
+ currentBatchId.get(),
+ fileWriter.getIOWriter().getFile().getPath(),
+ closeException.getMessage(),
+ closeException);
+ } finally {
+ // Add current writing file to the list and delete the file
+ sealedFiles.add(new Pair<>(dataBase, fileWriter.getIOWriter().getFile()));
+ }
+
+ for (final Pair sealedFile : sealedFiles) {
+ final boolean deleteSuccess = FileUtils.deleteQuietly(sealedFile.right);
+ LOGGER.warn(
+ "Batch id = {}: {} delete the tsfile {} after failed to write tablets into {}. {}",
+ currentBatchId.get(),
+ deleteSuccess ? "Successfully" : "Failed to",
+ sealedFile.right.getPath(),
+ fileWriter.getIOWriter().getFile().getPath(),
+ deleteSuccess ? "" : "Maybe the tsfile needs to be deleted manually.");
+ }
+ sealedFiles.clear();
+
+ fileWriter = null;
+
+ throw e;
+ }
+
+ fileWriter.close();
+ final File sealedFile = fileWriter.getIOWriter().getFile();
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug(
+ "Batch id = {}: Seal tsfile {} successfully.",
+ currentBatchId.get(),
+ sealedFile.getPath());
+ }
+ sealedFiles.add(new Pair<>(dataBase, sealedFile));
+ fileWriter = null;
+ }
+
+ return sealedFiles;
+ }
+
+ private >>>
+ void tryBestToWriteTabletsIntoOneFile(
+ final LinkedHashSet> device2TabletsLinkedList) throws IOException {
+ final Iterator> iterator = device2TabletsLinkedList.iterator();
+
+ while (iterator.hasNext()) {
+ final LinkedList tablets = iterator.next();
+
+ final List tabletsToWrite = new ArrayList<>();
+ final Map deviceLastTimestampMap = new HashMap<>();
+ while (!tablets.isEmpty()) {
+ final T pair = tablets.peekFirst();
+ if (timestampsAreNonOverlapping(
+ (Pair>>) pair, deviceLastTimestampMap)) {
+ tabletsToWrite.add(pair);
+ tablets.pollFirst();
+ continue;
+ }
+ break;
+ }
+
+ if (tablets.isEmpty()) {
+ iterator.remove();
+ }
+ boolean schemaNotRegistered = true;
+ for (final Pair>> pair : tabletsToWrite) {
+ final Tablet tablet = pair.left;
+ if (schemaNotRegistered) {
+ fileWriter.registerTableSchema(
+ new TableSchema(tablet.getTableName(), tablet.getSchemas(), tablet.getColumnTypes()));
+ schemaNotRegistered = false;
+ }
+ try {
+ fileWriter.writeTable(tablet, pair.right);
+ } catch (WriteProcessException e) {
+ LOGGER.warn(
+ "Batch id = {}: Failed to build the table model TSFile. Please check whether the written Tablet has time overlap and whether the Table Schema is correct.",
+ currentBatchId.get(),
+ e);
+ throw new PipeException(
+ "The written Tablet time may overlap or the Schema may be incorrect");
+ }
+ }
+ }
+ }
+
+ /**
+ * A Map is used to record the maximum time each {@link IDeviceID} is written. {@link Pair}
+ * records the Index+1 of the maximum timestamp of IDevice in each {@link Tablet}.
+ *
+ * @return If false, the tablet overlaps with the previous tablet; if true, there is no time
+ * overlap.
+ */
+ private >>>
+ boolean timestampsAreNonOverlapping(
+ final T tabletPair, final Map deviceLastTimestampMap) {
+ int currentTimestampIndex = 0;
+ for (Pair deviceTimestampIndexPair : tabletPair.right) {
+ final Long lastDeviceTimestamp = deviceLastTimestampMap.get(deviceTimestampIndexPair.left);
+ if (lastDeviceTimestamp != null
+ && lastDeviceTimestamp >= tabletPair.left.getTimestamp(currentTimestampIndex)) {
+ return false;
+ }
+ currentTimestampIndex = deviceTimestampIndexPair.right;
+ deviceLastTimestampMap.put(
+ deviceTimestampIndexPair.left, tabletPair.left.getTimestamp(currentTimestampIndex - 1));
+ }
+
+ return true;
+ }
+}
diff --git a/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/utils/builder/PipeTreeModelTsFileBuilder.java b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/utils/builder/PipeTreeModelTsFileBuilder.java
new file mode 100644
index 0000000..b47f1bd
--- /dev/null
+++ b/iotdb-collector/collector-core/src/main/java/org/apache/iotdb/collector/utils/builder/PipeTreeModelTsFileBuilder.java
@@ -0,0 +1,268 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.collector.utils.builder;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.tsfile.exception.write.WriteProcessException;
+import org.apache.tsfile.file.metadata.IDeviceID;
+import org.apache.tsfile.read.common.Path;
+import org.apache.tsfile.utils.Pair;
+import org.apache.tsfile.write.record.Tablet;
+import org.apache.tsfile.write.schema.IMeasurementSchema;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.concurrent.atomic.AtomicLong;
+
+public class PipeTreeModelTsFileBuilder extends PipeTsFileBuilder {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(PipeTreeModelTsFileBuilder.class);
+
+ private final List tabletList = new ArrayList<>();
+ private final List isTabletAlignedList = new ArrayList<>();
+
+ public PipeTreeModelTsFileBuilder(
+ final AtomicLong currentBatchId, final AtomicLong tsFileIdGenerator) {
+ super(currentBatchId, tsFileIdGenerator);
+ }
+
+ @Override
+ public void bufferTableModelTablet(final String dataBase, final Tablet tablet) {
+ throw new UnsupportedOperationException(
+ "PipeTreeModelTsFileBuilder does not support table model tablet to build TSFile");
+ }
+
+ @Override
+ public void bufferTreeModelTablet(final Tablet tablet, final Boolean isAligned) {
+ tabletList.add(tablet);
+ isTabletAlignedList.add(isAligned);
+ }
+
+ @Override
+ public List> convertTabletToTsFileWithDBInfo()
+ throws IOException, WriteProcessException {
+ return writeTabletsToTsFiles();
+ }
+
+ @Override
+ public boolean isEmpty() {
+ return tabletList.isEmpty();
+ }
+
+ @Override
+ public void onSuccess() {
+ super.onSuccess();
+ tabletList.clear();
+ isTabletAlignedList.clear();
+ }
+
+ @Override
+ public synchronized void close() {
+ super.close();
+ tabletList.clear();
+ isTabletAlignedList.clear();
+ }
+
+ private List> writeTabletsToTsFiles()
+ throws IOException, WriteProcessException {
+ final Map> device2Tablets = new HashMap<>();
+ final Map device2Aligned = new HashMap<>();
+
+ // Sort the tablets by device id
+ for (int i = 0, size = tabletList.size(); i < size; ++i) {
+ final Tablet tablet = tabletList.get(i);
+ final String deviceId = tablet.getDeviceId();
+ device2Tablets.computeIfAbsent(deviceId, k -> new ArrayList<>()).add(tablet);
+ device2Aligned.put(deviceId, isTabletAlignedList.get(i));
+ }
+
+ // Sort the tablets by start time in each device
+ for (final List tablets : device2Tablets.values()) {
+ tablets.sort(
+ // Each tablet has at least one timestamp
+ Comparator.comparingLong(tablet -> tablet.getTimestamp(0)));
+ }
+
+ // Sort the devices by device id
+ final List devices = new ArrayList<>(device2Tablets.keySet());
+ devices.sort(Comparator.naturalOrder());
+
+ // Replace ArrayList with LinkedList to improve performance
+ final LinkedHashMap> device2TabletsLinkedList =
+ new LinkedHashMap<>();
+ for (final String device : devices) {
+ device2TabletsLinkedList.put(device, new LinkedList<>(device2Tablets.get(device)));
+ }
+
+ // Help GC
+ devices.clear();
+ device2Tablets.clear();
+
+ // Write the tablets to the tsfile device by device, and the tablets
+ // in the same device are written in order of start time. Tablets in
+ // the same device should not be written if their time ranges overlap.
+ // If overlapped, we try to write the tablets whose device id is not
+ // the same as the previous one. For the tablets not written in the
+ // previous round, we write them in a new tsfile.
+ final List> sealedFiles = new ArrayList<>();
+
+ // Try making the tsfile size as large as possible
+ while (!device2TabletsLinkedList.isEmpty()) {
+ if (Objects.isNull(fileWriter)) {
+ createFileWriter();
+ }
+ try {
+ tryBestToWriteTabletsIntoOneFile(device2TabletsLinkedList, device2Aligned);
+ } catch (final Exception e) {
+ LOGGER.warn(
+ "Batch id = {}: Failed to write tablets into tsfile, because {}",
+ currentBatchId.get(),
+ e.getMessage(),
+ e);
+
+ try {
+ fileWriter.close();
+ } catch (final Exception closeException) {
+ LOGGER.warn(
+ "Batch id = {}: Failed to close the tsfile {} after failed to write tablets into, because {}",
+ currentBatchId.get(),
+ fileWriter.getIOWriter().getFile().getPath(),
+ closeException.getMessage(),
+ closeException);
+ } finally {
+ // Add current writing file to the list and delete the file
+ sealedFiles.add(new Pair<>(null, fileWriter.getIOWriter().getFile()));
+ }
+
+ for (final Pair sealedFile : sealedFiles) {
+ final boolean deleteSuccess = FileUtils.deleteQuietly(sealedFile.right);
+ LOGGER.warn(
+ "Batch id = {}: {} delete the tsfile {} after failed to write tablets into {}. {}",
+ currentBatchId.get(),
+ deleteSuccess ? "Successfully" : "Failed to",
+ sealedFile.right.getPath(),
+ fileWriter.getIOWriter().getFile().getPath(),
+ deleteSuccess ? "" : "Maybe the tsfile needs to be deleted manually.");
+ }
+ sealedFiles.clear();
+
+ fileWriter = null;
+
+ throw e;
+ }
+
+ fileWriter.close();
+ final File sealedFile = fileWriter.getIOWriter().getFile();
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug(
+ "Batch id = {}: Seal tsfile {} successfully.",
+ currentBatchId.get(),
+ sealedFile.getPath());
+ }
+ sealedFiles.add(new Pair<>(null, sealedFile));
+ fileWriter = null;
+ }
+
+ return sealedFiles;
+ }
+
+ private void tryBestToWriteTabletsIntoOneFile(
+ final LinkedHashMap