diff --git a/.github/workflows/client.yml b/.github/workflows/client-cpp.yml
similarity index 98%
rename from .github/workflows/client.yml
rename to .github/workflows/client-cpp.yml
index 39720f6b5df10..46080199ec01e 100644
--- a/.github/workflows/client.yml
+++ b/.github/workflows/client-cpp.yml
@@ -3,7 +3,7 @@
# CPP compiling is too slow, so let's do it in parallel with testing other modules.
# As there is no Java client, we just use one JDK.
-name: Clients_except_Java CI with Maven
+name: C++ Client
on:
push:
diff --git a/.github/workflows/client-python.yml b/.github/workflows/client-python.yml
new file mode 100644
index 0000000000000..230f9a29c5eb3
--- /dev/null
+++ b/.github/workflows/client-python.yml
@@ -0,0 +1,63 @@
+# This workflow is just for checking whether modifications works for the Python client.
+
+name: Python Client
+
+on:
+ push:
+ branches:
+ - master
+ - 'rel/*'
+ - "new_*"
+ paths-ignore:
+ - 'docs/**'
+ pull_request:
+ branches:
+ - master
+ - 'rel/*'
+ - "new_*"
+ paths-ignore:
+ - 'docs/**'
+ # allow manually run the action:
+ workflow_dispatch:
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+env:
+ MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3
+
+jobs:
+ unix:
+ strategy:
+ fail-fast: false
+ max-parallel: 20
+ matrix:
+ java: [ 11 ]
+ os: [ ubuntu-latest ]
+ runs-on: ${{ matrix.os}}
+
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up JDK ${{ matrix.java }}
+ uses: actions/setup-java@v1
+ with:
+ java-version: ${{ matrix.java }}
+ - name: Cache Maven packages
+ uses: actions/cache@v2
+ with:
+ path: ~/.m2
+ key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }}
+ restore-keys: ${{ runner.os }}-m2-
+ - name: Build IoTDB server distribution zip and python client
+ run: mvn -B clean install -pl distribution,client-py -am -DskipTests
+ - name: Build IoTDB server docker image
+ run: |
+ docker build . -f docker/src/main/Dockerfile-single -t "iotdb:dev"
+ docker images
+ - name: Install IoTDB python client requirements
+ run: pip3 install -r client-py/requirements_dev.txt
+ - name: Integration test
+ shell: bash
+ run: |
+ cd client-py && pytest .
\ No newline at end of file
diff --git a/.github/workflows/grafana-plugin.yml b/.github/workflows/grafana-plugin.yml
index a84108110bd22..fc8564c205250 100644
--- a/.github/workflows/grafana-plugin.yml
+++ b/.github/workflows/grafana-plugin.yml
@@ -18,7 +18,7 @@ jobs:
- name: Setup Node.js environment
uses: actions/setup-node@v2.1.5
with:
- node-version: "12.x"
+ node-version: "14.x"
- name: Get yarn cache directory path
id: yarn-cache-dir-path
diff --git a/.github/workflows/sonar-coveralls.yml b/.github/workflows/sonar-coveralls.yml
index f646ce5bab4f2..d832dd0bd2725 100644
--- a/.github/workflows/sonar-coveralls.yml
+++ b/.github/workflows/sonar-coveralls.yml
@@ -18,13 +18,6 @@ on:
- "new_*"
paths-ignore:
- "docs/**"
- pull_request_target:
- branches:
- - master
- - "rel/*"
- - "new_*"
- paths-ignore:
- - "docs/**"
# allow manually run the action:
workflow_dispatch:
diff --git a/.gitignore b/.gitignore
index 68d7afef18650..6b90b2f1f5c66 100644
--- a/.gitignore
+++ b/.gitignore
@@ -40,6 +40,7 @@ tsfile-jdbc/src/main/resources/output/queryRes.csv
*.gz
*.tar.gz
*.tar
+*.tokens
#src/test/resources/logback.xml
### CSV ###
diff --git a/LICENSE b/LICENSE
index d84f2bd3545d0..6c45bf8f5d7ce 100644
--- a/LICENSE
+++ b/LICENSE
@@ -234,3 +234,43 @@ The following files include code modified from Michael Burman's gorilla-tsc proj
Copyright: 2016-2018 Michael Burman and/or other contributors
Project page: https://github.com/burmanm/gorilla-tsc
License: http://www.apache.org/licenses/LICENSE-2.0
+
+--------------------------------------------------------------------------------
+
+The following files include code modified from Apache HBase project.
+
+./confignode/src/main/java/org/apache/iotdb/procedure/Procedure.java
+./confignode/src/main/java/org/apache/iotdb/procedure/ProcedureExecutor.java
+./confignode/src/main/java/org/apache/iotdb/procedure/StateMachineProcedure.java
+./confignode/src/main/java/org/apache/iotdb/procedure/TimeoutExecutorThread.java
+./confignode/src/main/java/org/apache/iotdb/procedure/StoppableThread.java
+
+Copyright: 2016-2018 Michael Burman and/or other contributors
+Project page: https://github.com/burmanm/gorilla-tsc
+License: http://www.apache.org/licenses/LICENSE-2.0
+
+--------------------------------------------------------------------------------
+
+The following files include code modified from Eclipse Collections project.
+
+./tsfile/src/main/java/org/apache/iotdb/tsfile/utils/ByteArrayList.java
+
+Copyright: 2021 Goldman Sachs
+Project page: https://www.eclipse.org/collections
+License: https://github.com/eclipse/eclipse-collections/blob/master/LICENSE-EDL-1.0.txt
+
+--------------------------------------------------------------------------------
+
+The following files include code modified from Micrometer project.
+
+./metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/jvm/JvmClassLoaderMetrics
+./metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/jvm/JvmCompileMetrics
+./metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/jvm/JvmGcMetrics
+./metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/jvm/JvmMemoryMetrics
+./metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/jvm/JvmThreadMetrics
+./metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/logback/LogbackMetrics
+./metrics/interface/src/main/java/org/apache/iotdb/metrics/utils/JvmUtils
+
+Copyright: 2017 VMware
+Project page: https://github.com/micrometer-metrics/micrometer
+License: https://github.com/micrometer-metrics/micrometer/blob/main/LICENSE
\ No newline at end of file
diff --git a/antlr/pom.xml b/antlr/pom.xml
index b9d8c10386825..83a5efbc2b508 100644
--- a/antlr/pom.xml
+++ b/antlr/pom.xml
@@ -24,7 +24,7 @@
org.apache.iotdbiotdb-parent
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmliotdb-antlr
diff --git a/cli/pom.xml b/cli/pom.xml
index 741ff6d7059ae..c84e2a6bdfb99 100644
--- a/cli/pom.xml
+++ b/cli/pom.xml
@@ -24,7 +24,7 @@
org.apache.iotdbiotdb-parent
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmliotdb-cli
diff --git a/cli/src/main/java/org/apache/iotdb/tool/ImportCsv.java b/cli/src/main/java/org/apache/iotdb/tool/ImportCsv.java
index 02e4c7ca51fe7..14374603173af 100644
--- a/cli/src/main/java/org/apache/iotdb/tool/ImportCsv.java
+++ b/cli/src/main/java/org/apache/iotdb/tool/ImportCsv.java
@@ -607,7 +607,11 @@ private static void writeAndEmptyDataSet(
List> measurementsList,
int retryTime) {
try {
- session.insertAlignedRecords(deviceIds, times, measurementsList, typesList, valuesList);
+ if (!aligned) {
+ session.insertRecords(deviceIds, times, measurementsList, typesList, valuesList);
+ } else {
+ session.insertAlignedRecords(deviceIds, times, measurementsList, typesList, valuesList);
+ }
} catch (IoTDBConnectionException e) {
if (retryTime > 0) {
try {
diff --git a/client-cpp/pom.xml b/client-cpp/pom.xml
index 381ff59cc2e5b..999f494997ed3 100644
--- a/client-cpp/pom.xml
+++ b/client-cpp/pom.xml
@@ -24,7 +24,7 @@
iotdb-parentorg.apache.iotdb
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmlclient-cpp
diff --git a/client-py/README.md b/client-py/README.md
index d5880af36cd5c..41c0a113b8f17 100644
--- a/client-py/README.md
+++ b/client-py/README.md
@@ -39,60 +39,242 @@ architecture, high performance and rich feature set together with its deep integ
Apache Hadoop, Spark and Flink, Apache IoTDB can meet the requirements of massive data storage,
high-speed data ingestion and complex data analysis in the IoT industrial fields.
+## Python Native API
-# Apache IoTDB Python Client API
+### Requirements
-Using the package, you can write data to IoTDB, read data from IoTDB and maintain the schema of IoTDB.
+You have to install thrift (>=0.13) before using the package.
-## Requirements
-You have to install thrift (>=0.13) before using the package.
-## How to use (Example)
+### How to use (Example)
+
+First, download the latest package: `pip3 install apache-iotdb`
-First, download the package: `pip3 install apache-iotdb`
+*Notice: If you are installing Python API v0.13.0, DO NOT install by `pip install apache-iotdb==0.13.0`, use `pip install apache-iotdb==0.13.0.post1` instead!*
-You can get an example of using the package to read and write data at here: [Example](https://github.com/apache/iotdb/blob/rel/0.11/client-py/src/SessionExample.py)
+You can get an example of using the package to read and write data at here: [Example](https://github.com/apache/iotdb/blob/master/client-py/SessionExample.py)
+
+An example of aligned timeseries: [Aligned Timeseries Session Example](https://github.com/apache/iotdb/blob/master/client-py/SessionAlignedTimeseriesExample.py)
(you need to add `import iotdb` in the head of the file)
Or:
```python
-
from iotdb.Session import Session
ip = "127.0.0.1"
port_ = "6667"
-username_ = 'root'
-password_ = 'root'
+username_ = "root"
+password_ = "root"
session = Session(ip, port_, username_, password_)
session.open(False)
zone = session.get_time_zone()
session.close()
+```
+
+### Initialization
+
+* Initialize a Session
+
+```python
+session = Session(ip, port_, username_, password_, fetch_size=1024, zone_id="UTC+8")
+```
+
+* Open a session, with a parameter to specify whether to enable RPC compression
+```python
+session.open(enable_rpc_compression=False)
```
-## IoTDB Testcontainer
+Notice: this RPC compression status of client must comply with that of IoTDB server
-The Test Support is based on the lib `testcontainers` (https://testcontainers-python.readthedocs.io/en/latest/index.html) which you need to install in your project if you want to use the feature.
+* Close a Session
-To start (and stop) an IoTDB Database in a Docker container simply do:
+```python
+session.close()
```
-class MyTestCase(unittest.TestCase):
- def test_something(self):
- with IoTDBContainer() as c:
- session = Session('localhost', c.get_exposed_port(6667), 'root', 'root')
- session.open(False)
- result = session.execute_query_statement("SHOW TIMESERIES")
- print(result)
- session.close()
+### Data Definition Interface (DDL Interface)
+
+#### Storage Group Management
+
+* Set storage group
+
+```python
+session.set_storage_group(group_name)
```
-by default it will load the image `apache/iotdb:latest`, if you want a specific version just pass it like e.g. `IoTDBContainer("apache/iotdb:0.12.0")` to get version `0.12.0` running.
+* Delete one or several storage groups
+
+```python
+session.delete_storage_group(group_name)
+session.delete_storage_groups(group_name_lst)
+```
+#### Timeseries Management
+
+* Create one or multiple timeseries
+
+```python
+session.create_time_series(ts_path, data_type, encoding, compressor,
+ props=None, tags=None, attributes=None, alias=None)
+
+session.create_multi_time_series(
+ ts_path_lst, data_type_lst, encoding_lst, compressor_lst,
+ props_lst=None, tags_lst=None, attributes_lst=None, alias_lst=None
+)
+```
-## Pandas Support
+* Create aligned timeseries
+
+```python
+session.create_aligned_time_series(
+ device_id, measurements_lst, data_type_lst, encoding_lst, compressor_lst
+)
+```
+
+Attention: Alias of measurements are **not supported** currently.
+
+* Delete one or several timeseries
+
+```python
+session.delete_time_series(paths_list)
+```
+
+* Check whether the specific timeseries exists
+
+```python
+session.check_time_series_exists(path)
+```
+
+### Data Manipulation Interface (DML Interface)
+
+#### Insert
+
+It is recommended to use insertTablet to help improve write efficiency.
+
+* Insert a Tablet,which is multiple rows of a device, each row has the same measurements
+ * **Better Write Performance**
+ * **Support null values**: fill the null value with any value, and then mark the null value via BitMap (from v0.13)
+
+
+We have two implementations of Tablet in Python API.
+
+* Normal Tablet
+
+```python
+values_ = [
+ [False, 10, 11, 1.1, 10011.1, "test01"],
+ [True, 100, 11111, 1.25, 101.0, "test02"],
+ [False, 100, 1, 188.1, 688.25, "test03"],
+ [True, 0, 0, 0, 6.25, "test04"],
+]
+timestamps_ = [1, 2, 3, 4]
+tablet_ = Tablet(
+ device_id, measurements_, data_types_, values_, timestamps_
+)
+session.insert_tablet(tablet_)
+```
+* Numpy Tablet
+
+Comparing with Tablet, Numpy Tablet is using [numpy.ndarray](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html) to record data.
+With less memory footprint and time cost of serialization, the insert performance will be better.
+
+**Notice**
+1. time and value columns in Tablet are ndarray.
+2. recommended to use the specific dtypes to each ndarray, see the example below
+(if not, the default dtypes are also ok).
+
+```python
+data_types_ = [
+ TSDataType.BOOLEAN,
+ TSDataType.INT32,
+ TSDataType.INT64,
+ TSDataType.FLOAT,
+ TSDataType.DOUBLE,
+ TSDataType.TEXT,
+]
+np_values_ = [
+ np.array([False, True, False, True], TSDataType.BOOLEAN.np_dtype()),
+ np.array([10, 100, 100, 0], TSDataType.INT32.np_dtype()),
+ np.array([11, 11111, 1, 0], TSDataType.INT64.np_dtype()),
+ np.array([1.1, 1.25, 188.1, 0], TSDataType.FLOAT.np_dtype()),
+ np.array([10011.1, 101.0, 688.25, 6.25], TSDataType.DOUBLE.np_dtype()),
+ np.array(["test01", "test02", "test03", "test04"], TSDataType.TEXT.np_dtype()),
+]
+np_timestamps_ = np.array([1, 2, 3, 4], TSDataType.INT64.np_dtype())
+np_tablet_ = NumpyTablet(
+ "root.sg_test_01.d_02", measurements_, data_types_, np_values_, np_timestamps_
+)
+session.insert_tablet(np_tablet_)
+```
+
+* Insert multiple Tablets
+
+```python
+session.insert_tablets(tablet_lst)
+```
+
+* Insert a Record
+
+```python
+session.insert_record(device_id, timestamp, measurements_, data_types_, values_)
+```
+
+* Insert multiple Records
+
+```python
+session.insert_records(
+ device_ids_, time_list_, measurements_list_, data_type_list_, values_list_
+)
+```
+
+* Insert multiple Records that belong to the same device.
+ With type info the server has no need to do type inference, which leads a better performance
+
+
+```python
+session.insert_records_of_one_device(device_id, time_list, measurements_list, data_types_list, values_list)
+```
+
+#### Insert with type inference
+
+When the data is of String type, we can use the following interface to perform type inference based on the value of the value itself. For example, if value is "true" , it can be automatically inferred to be a boolean type. If value is "3.2" , it can be automatically inferred as a flout type. Without type information, server has to do type inference, which may cost some time.
+
+* Insert a Record, which contains multiple measurement value of a device at a timestamp
+
+```python
+session.insert_str_record(device_id, timestamp, measurements, string_values)
+```
+
+#### Insert of Aligned Timeseries
+
+The Insert of aligned timeseries uses interfaces like insert_aligned_XXX, and others are similar to the above interfaces:
+
+* insert_aligned_record
+* insert_aligned_records
+* insert_aligned_records_of_one_device
+* insert_aligned_tablet
+* insert_aligned_tablets
+
+
+### IoTDB-SQL Interface
+
+* Execute query statement
+
+```python
+session.execute_query_statement(sql)
+```
+
+* Execute non query statement
+
+```python
+session.execute_non_query_statement(sql)
+```
+
+
+### Pandas Support
To easily transform a query result to a [Pandas Dataframe](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html)
the SessionDataSet has a method `.todf()` which consumes the dataset and transforms it to a pandas dataframe.
@@ -100,13 +282,12 @@ the SessionDataSet has a method `.todf()` which consumes the dataset and transfo
Example:
```python
-
from iotdb.Session import Session
ip = "127.0.0.1"
port_ = "6667"
-username_ = 'root'
-password_ = 'root'
+username_ = "root"
+password_ = "root"
session = Session(ip, port_, username_, password_)
session.open(False)
result = session.execute_query_statement("SELECT * FROM root.*")
@@ -120,29 +301,52 @@ session.close()
df = ...
```
+
+### IoTDB Testcontainer
+
+The Test Support is based on the lib `testcontainers` (https://testcontainers-python.readthedocs.io/en/latest/index.html) which you need to install in your project if you want to use the feature.
+
+To start (and stop) an IoTDB Database in a Docker container simply do:
+```python
+class MyTestCase(unittest.TestCase):
+
+ def test_something(self):
+ with IoTDBContainer() as c:
+ session = Session("localhost", c.get_exposed_port(6667), "root", "root")
+ session.open(False)
+ result = session.execute_query_statement("SHOW TIMESERIES")
+ print(result)
+ session.close()
+```
+
+by default it will load the image `apache/iotdb:latest`, if you want a specific version just pass it like e.g. `IoTDBContainer("apache/iotdb:0.12.0")` to get version `0.12.0` running.
+
+
## Developers
### Introduction
-This is an example of how to connect to IoTDB with python, using the thrift rpc interfaces. Things
-are almost the same on Windows or Linux, but pay attention to the difference like path separator.
+This is an example of how to connect to IoTDB with python, using the thrift rpc interfaces. Things are almost the same on Windows or Linux, but pay attention to the difference like path separator.
+
+
### Prerequisites
-python3.7 or later is preferred.
+Python3.7 or later is preferred.
-You have to install Thrift (0.11.0 or later) to compile our thrift file into python code. Below is the official
-tutorial of installation, eventually, you should have a thrift executable.
+You have to install Thrift (0.11.0 or later) to compile our thrift file into python code. Below is the official tutorial of installation, eventually, you should have a thrift executable.
```
http://thrift.apache.org/docs/install/
```
Before starting you need to install `requirements_dev.txt` in your python environment, e.g. by calling
-```
+```shell
pip install -r requirements_dev.txt
```
+
+
### Compile the thrift library and Debug
In the root of IoTDB's source code folder, run `mvn clean generate-sources -pl client-py -am`.
@@ -153,10 +357,11 @@ This folder is ignored from git and should **never be pushed to git!**
**Notice** Do not upload `iotdb/thrift` to the git repo.
+
+
### Session Client & Example
-We packed up the Thrift interface in `client-py/src/iotdb/Session.py` (similar with its Java counterpart), also provided
-an example file `client-py/src/SessionExample.py` of how to use the session module. please read it carefully.
+We packed up the Thrift interface in `client-py/src/iotdb/Session.py` (similar with its Java counterpart), also provided an example file `client-py/src/SessionExample.py` of how to use the session module. please read it carefully.
Or, another simple example:
@@ -166,26 +371,33 @@ from iotdb.Session import Session
ip = "127.0.0.1"
port_ = "6667"
-username_ = 'root'
-password_ = 'root'
+username_ = "root"
+password_ = "root"
session = Session(ip, port_, username_, password_)
session.open(False)
zone = session.get_time_zone()
session.close()
```
+
+
### Tests
Please add your custom tests in `tests` folder.
+
To run all defined tests just type `pytest .` in the root folder.
**Notice** Some tests need docker to be started on your system as a test instance is started in a docker container using [testcontainers](https://testcontainers-python.readthedocs.io/en/latest/index.html).
+
+
### Futher Tools
[black](https://pypi.org/project/black/) and [flake8](https://pypi.org/project/flake8/) are installed for autoformatting and linting.
Both can be run by `black .` or `flake8 .` respectively.
+
+
## Releasing
To do a release just ensure that you have the right set of generated thrift files.
@@ -193,10 +405,14 @@ Then run linting and auto-formatting.
Then, ensure that all tests work (via `pytest .`).
Then you are good to go to do a release!
+
+
### Preparing your environment
First, install all necessary dev dependencies via `pip install -r requirements_dev.txt`.
+
+
### Doing the Release
There is a convenient script `release.sh` to do all steps for a release.
@@ -208,3 +424,5 @@ Namely, these are
* Run Tests via pytest
* Build
* Release to pypi
+
+
diff --git a/client-py/SessionAlignedTimeseriesExample.py b/client-py/SessionAlignedTimeseriesExample.py
index a54b1694224b6..0787fe91c34ad 100644
--- a/client-py/SessionAlignedTimeseriesExample.py
+++ b/client-py/SessionAlignedTimeseriesExample.py
@@ -52,7 +52,11 @@
encoding_lst_ = [TSEncoding.PLAIN for _ in range(len(data_type_lst_))]
compressor_lst_ = [Compressor.SNAPPY for _ in range(len(data_type_lst_))]
session.create_aligned_time_series(
- "root.sg_test_01.d_02", measurements_lst_, data_type_lst_, encoding_lst_, compressor_lst_
+ "root.sg_test_01.d_02",
+ measurements_lst_,
+ data_type_lst_,
+ encoding_lst_,
+ compressor_lst_,
)
# setting more aligned time series once.
@@ -75,7 +79,11 @@
encoding_lst_ = [TSEncoding.PLAIN for _ in range(len(data_type_lst_))]
compressor_lst_ = [Compressor.SNAPPY for _ in range(len(data_type_lst_))]
session.create_aligned_time_series(
- "root.sg_test_01.d_02", measurements_lst_, data_type_lst_, encoding_lst_, compressor_lst_
+ "root.sg_test_01.d_02",
+ measurements_lst_,
+ data_type_lst_,
+ encoding_lst_,
+ compressor_lst_,
)
# delete time series
@@ -108,7 +116,9 @@
TSDataType.DOUBLE,
TSDataType.TEXT,
]
-session.insert_aligned_record("root.sg_test_01.d_02", 1, measurements_, data_types_, values_)
+session.insert_aligned_record(
+ "root.sg_test_01.d_02", 1, measurements_, data_types_, values_
+)
# insert multiple aligned records into database
measurements_list_ = [
diff --git a/client-py/SessionAlignedTimeseriesTest.py b/client-py/SessionAlignedTimeseriesTest.py
deleted file mode 100644
index fbd36dff5fbd2..0000000000000
--- a/client-py/SessionAlignedTimeseriesTest.py
+++ /dev/null
@@ -1,280 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Uncomment the following line to use apache-iotdb module installed by pip3
-from iotdb.Session import Session
-from iotdb.utils.IoTDBConstants import TSDataType, TSEncoding, Compressor
-from iotdb.utils.Tablet import Tablet
-
-# whether the test has passed
-final_flag = True
-failed_count = 0
-
-
-def test_fail():
- global failed_count
- global final_flag
- final_flag = False
- failed_count += 1
-
-
-def print_message(message):
- print("*********")
- print(message)
- print("*********")
-
-
-# creating session connection.
-ip = "127.0.0.1"
-port_ = "6667"
-username_ = "root"
-password_ = "root"
-session = Session(ip, port_, username_, password_, fetch_size=1024, zone_id="UTC+8")
-session.open(False)
-
-if not session.is_open():
- print("can't open session")
- exit(1)
-
-# set and delete storage groups
-session.set_storage_group("root.sg_test_01")
-session.set_storage_group("root.sg_test_02")
-session.set_storage_group("root.sg_test_03")
-session.set_storage_group("root.sg_test_04")
-
-if session.delete_storage_group("root.sg_test_02") < 0:
- test_fail()
- print_message("delete storage group failed")
-
-if session.delete_storage_groups(["root.sg_test_03", "root.sg_test_04"]) < 0:
- test_fail()
- print_message("delete storage groups failed")
-
-# setting aligned time series.
-measurements_lst_ = [
- "s_01",
- "s_02",
- "s_03",
-]
-data_type_lst_ = [
- TSDataType.BOOLEAN,
- TSDataType.INT32,
- TSDataType.INT64,
-]
-encoding_lst_ = [TSEncoding.PLAIN for _ in range(len(data_type_lst_))]
-compressor_lst_ = [Compressor.SNAPPY for _ in range(len(data_type_lst_))]
-session.create_aligned_time_series(
- "root.sg_test_01.d_02", measurements_lst_, data_type_lst_, encoding_lst_, compressor_lst_
-)
-
-# setting more aligned time series once.
-measurements_lst_ = [
- "s_04",
- "s_05",
- "s_06",
- "s_07",
- "s_08",
- "s_09",
-]
-data_type_lst_ = [
- TSDataType.FLOAT,
- TSDataType.DOUBLE,
- TSDataType.TEXT,
- TSDataType.FLOAT,
- TSDataType.DOUBLE,
- TSDataType.TEXT,
-]
-encoding_lst_ = [TSEncoding.PLAIN for _ in range(len(data_type_lst_))]
-compressor_lst_ = [Compressor.SNAPPY for _ in range(len(data_type_lst_))]
-session.create_aligned_time_series(
- "root.sg_test_01.d_02", measurements_lst_, data_type_lst_, encoding_lst_, compressor_lst_
-)
-
-# delete time series
-if (
- session.delete_time_series(
- [
- "root.sg_test_01.d_02.s_07",
- "root.sg_test_01.d_02.s_08",
- "root.sg_test_01.d_02.s_09",
- ]
- )
- < 0
-):
- test_fail()
- print_message("delete time series failed")
-
-# checking time series
-# s_07 expecting False
-if session.check_time_series_exists("root.sg_test_01.d_02.s_07"):
- test_fail()
- print_message("root.sg_test_01.d_02.s_07 shouldn't exist")
-
-# s_03 expecting True
-if not session.check_time_series_exists("root.sg_test_01.d_02.s_03"):
- test_fail()
- print_message("root.sg_test_01.d_02.s_03 should exist")
-
-# insert one record into the database.
-measurements_ = ["s_01", "s_02", "s_03", "s_04", "s_05", "s_06"]
-values_ = [False, 10, 11, 1.1, 10011.1, "test_record"]
-data_types_ = [
- TSDataType.BOOLEAN,
- TSDataType.INT32,
- TSDataType.INT64,
- TSDataType.FLOAT,
- TSDataType.DOUBLE,
- TSDataType.TEXT,
-]
-if (
- session.insert_aligned_record(
- "root.sg_test_01.d_02", 1, measurements_, data_types_, values_
- )
- < 0
-):
- test_fail()
- print_message("insert record failed")
-
-# insert multiple records into database
-measurements_list_ = [
- ["s_01", "s_02", "s_03", "s_04", "s_05", "s_06"],
- ["s_01", "s_02", "s_03", "s_04", "s_05", "s_06"],
-]
-values_list_ = [
- [False, 22, 33, 4.4, 55.1, "test_records01"],
- [True, 77, 88, 1.25, 8.125, "test_records02"],
-]
-data_type_list_ = [data_types_, data_types_]
-device_ids_ = ["root.sg_test_01.d_02", "root.sg_test_01.d_02"]
-if (
- session.insert_aligned_records(
- device_ids_, [2, 3], measurements_list_, data_type_list_, values_list_
- )
- < 0
-):
- test_fail()
- print_message("insert records failed")
-
-# insert one tablet into the database.
-values_ = [
- [False, 10, 11, 1.1, 10011.1, "test01"],
- [True, 100, 11111, 1.25, 101.0, "test02"],
- [False, 100, 1, 188.1, 688.25, "test03"],
- [True, 0, 0, 0, 6.25, "test04"],
-] # Non-ASCII text will cause error since bytes can only hold 0-128 nums.
-timestamps_ = [4, 5, 6, 7]
-tablet_ = Tablet(
- "root.sg_test_01.d_02", measurements_, data_types_, values_, timestamps_
-)
-if session.insert_aligned_tablet(tablet_) < 0:
- test_fail()
- print_message("insert tablet failed")
-
-# insert multiple tablets into database
-tablet_01 = Tablet(
- "root.sg_test_01.d_02", measurements_, data_types_, values_, [8, 9, 10, 11]
-)
-tablet_02 = Tablet(
- "root.sg_test_01.d_02", measurements_, data_types_, values_, [12, 13, 14, 15]
-)
-if session.insert_aligned_tablets([tablet_01, tablet_02]) < 0:
- test_fail()
- print_message("insert tablets failed")
-
-# insert one tablet with empty cells into the database.
-values_ = [
- [None, 10, 11, 1.1, 10011.1, "test01"],
- [True, None, 11111, 1.25, 101.0, "test02"],
- [False, 100, 1, None, 688.25, "test03"],
- [True, 0, 0, 0, None, None],
-] # Non-ASCII text will cause error since bytes can only hold 0-128 nums.
-timestamps_ = [20, 21, 22, 23]
-tablet_ = Tablet(
- "root.sg_test_01.d_02", measurements_, data_types_, values_, timestamps_
-)
-if session.insert_aligned_tablet(tablet_) < 0:
- test_fail()
- print_message("insert tablet with empty cells failed")
-
-# insert records of one device
-time_list = [1, 2, 3]
-measurements_list = [
- ["s_01", "s_02", "s_03"],
- ["s_01", "s_02", "s_03"],
- ["s_01", "s_02", "s_03"],
-]
-data_types_list = [
- [TSDataType.BOOLEAN, TSDataType.INT32, TSDataType.INT64],
- [TSDataType.BOOLEAN, TSDataType.INT32, TSDataType.INT64],
- [TSDataType.BOOLEAN, TSDataType.INT32, TSDataType.INT64],
-]
-values_list = [[False, 22, 33], [True, 1, 23], [False, 15, 26]]
-
-if (
- session.insert_aligned_records_of_one_device(
- "root.sg_test_01.d_02",
- time_list,
- measurements_list,
- data_types_list,
- values_list,
- )
- < 0
-):
- test_fail()
- print_message("insert records of one device failed")
-
-# execute non-query sql statement
-if (
- session.execute_non_query_statement(
- "insert into root.sg_test_01.d_02(timestamp, s_02) aligned values(16, 188)"
- )
- < 0
-):
- test_fail()
- print_message(
- "execute 'insert into root.sg_test_01.d_02(timestamp, s_02) aligned values(16, 188)' failed"
- )
-
-# execute sql query statement
-session_data_set = session.execute_query_statement("select * from root.sg_test_01.d_02")
-session_data_set.set_fetch_size(1024)
-expect_count = 20
-actual_count = 0
-while session_data_set.has_next():
- print(session_data_set.next())
- actual_count += 1
-session_data_set.close_operation_handle()
-
-if actual_count != expect_count:
- test_fail()
- print_message(
- "query count mismatch: expect count: "
- + str(expect_count)
- + " actual count: "
- + str(actual_count)
- )
-
-# close session connection.
-session.close()
-
-if final_flag:
- print("All executions done!!")
-else:
- print("Some test failed, please have a check")
- print("failed count: ", failed_count)
- exit(1)
diff --git a/client-py/SessionExample.py b/client-py/SessionExample.py
index 21a1702b5d602..bbc96695275f9 100644
--- a/client-py/SessionExample.py
+++ b/client-py/SessionExample.py
@@ -58,7 +58,7 @@
None,
{"tag1": "v1"},
{"description": "v1"},
- "temperature"
+ "temperature",
)
# setting multiple time series once.
@@ -105,7 +105,14 @@
tags_lst_ = [{"tag2": "v2"} for _ in range(len(data_type_lst_))]
attributes_lst_ = [{"description": "v2"} for _ in range(len(data_type_lst_))]
session.create_multi_time_series(
- ts_path_lst_, data_type_lst_, encoding_lst_, compressor_lst_, None, tags_lst_, attributes_lst_, None
+ ts_path_lst_,
+ data_type_lst_,
+ encoding_lst_,
+ compressor_lst_,
+ None,
+ tags_lst_,
+ attributes_lst_,
+ None,
)
# delete time series
@@ -178,19 +185,41 @@
# insert one numpy tablet into the database.
np_values_ = [
- np.array([False, True, False, True], np.dtype('>?')),
- np.array([10, 100, 100, 0], np.dtype('>i4')),
- np.array([11, 11111, 1, 0], np.dtype('>i8')),
- np.array([1.1, 1.25, 188.1, 0], np.dtype('>f4')),
- np.array([10011.1, 101.0, 688.25, 6.25], np.dtype('>f8')),
- ["test01", "test02", "test03", "test04"],
+ np.array([False, True, False, True], TSDataType.BOOLEAN.np_dtype()),
+ np.array([10, 100, 100, 0], TSDataType.INT32.np_dtype()),
+ np.array([11, 11111, 1, 0], TSDataType.INT64.np_dtype()),
+ np.array([1.1, 1.25, 188.1, 0], TSDataType.FLOAT.np_dtype()),
+ np.array([10011.1, 101.0, 688.25, 6.25], TSDataType.DOUBLE.np_dtype()),
+ np.array(["test01", "test02", "test03", "test04"], TSDataType.TEXT.np_dtype()),
]
-np_timestamps_ = np.array([1, 2, 3, 4], np.dtype('>i8'))
+np_timestamps_ = np.array([1, 2, 3, 4], TSDataType.INT64.np_dtype())
np_tablet_ = NumpyTablet(
"root.sg_test_01.d_02", measurements_, data_types_, np_values_, np_timestamps_
)
session.insert_tablet(np_tablet_)
+# insert one unsorted numpy tablet into the database.
+np_values_unsorted = [
+ np.array([False, False, False, True, True], np.dtype(">?")),
+ np.array([0, 10, 100, 1000, 10000], np.dtype(">i4")),
+ np.array([1, 11, 111, 1111, 11111], np.dtype(">i8")),
+ np.array([1.1, 1.25, 188.1, 0, 8.999], np.dtype(">f4")),
+ np.array([10011.1, 101.0, 688.25, 6.25, 8, 776], np.dtype(">f8")),
+ np.array(["test09", "test08", "test07", "test06", "test05"]),
+]
+np_timestamps_unsorted = np.array([9, 8, 7, 6, 5], np.dtype(">i8"))
+np_tablet_unsorted = NumpyTablet(
+ "root.sg_test_01.d_02",
+ measurements_,
+ data_types_,
+ np_values_unsorted,
+ np_timestamps_unsorted,
+)
+session.insert_tablet(np_tablet_unsorted)
+print(np_tablet_unsorted.get_timestamps())
+for value in np_tablet_unsorted.get_values():
+ print(value)
+
# insert multiple tablets into database
tablet_01 = Tablet(
"root.sg_test_01.d_01", measurements_, data_types_, values_, [8, 9, 10, 11]
@@ -251,6 +280,9 @@
while session_data_set.has_next():
print(session_data_set.next())
+# delete storage group
+session.delete_storage_group("root.sg_test_01")
+
# close session connection.
session.close()
diff --git a/client-py/SessionTest.py b/client-py/SessionTest.py
deleted file mode 100644
index 5435df3090ba3..0000000000000
--- a/client-py/SessionTest.py
+++ /dev/null
@@ -1,336 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Uncomment the following line to use apache-iotdb module installed by pip3
-import numpy as np
-
-from iotdb.Session import Session
-from iotdb.utils.IoTDBConstants import TSDataType, TSEncoding, Compressor
-from iotdb.utils.NumpyTablet import NumpyTablet
-from iotdb.utils.Tablet import Tablet
-
-# whether the test has passed
-final_flag = True
-failed_count = 0
-
-
-def test_fail():
- global failed_count
- global final_flag
- final_flag = False
- failed_count += 1
-
-
-def print_message(message):
- print("*********")
- print(message)
- print("*********")
-
-
-# creating session connection.
-ip = "127.0.0.1"
-port_ = "6667"
-username_ = "root"
-password_ = "root"
-session = Session(ip, port_, username_, password_, fetch_size=1024, zone_id="UTC+8")
-session.open(False)
-
-if not session.is_open():
- print("can't open session")
- exit(1)
-
-# set and delete storage groups
-session.set_storage_group("root.sg_test_01")
-session.set_storage_group("root.sg_test_02")
-session.set_storage_group("root.sg_test_03")
-session.set_storage_group("root.sg_test_04")
-
-if session.delete_storage_group("root.sg_test_02") < 0:
- test_fail()
- print_message("delete storage group failed")
-
-if session.delete_storage_groups(["root.sg_test_03", "root.sg_test_04"]) < 0:
- test_fail()
- print_message("delete storage groups failed")
-
-# setting time series.
-session.create_time_series(
- "root.sg_test_01.d_01.s_01", TSDataType.BOOLEAN, TSEncoding.PLAIN, Compressor.SNAPPY
-)
-session.create_time_series(
- "root.sg_test_01.d_01.s_02", TSDataType.INT32, TSEncoding.PLAIN, Compressor.SNAPPY
-)
-session.create_time_series(
- "root.sg_test_01.d_01.s_03", TSDataType.INT64, TSEncoding.PLAIN, Compressor.SNAPPY
-)
-session.create_time_series(
- "root.sg_test_01.d_02.s_01",
- TSDataType.BOOLEAN,
- TSEncoding.PLAIN,
- Compressor.SNAPPY,
- None,
- {"tag1": "v1"},
- {"description": "v1"},
- "temperature"
-)
-
-# setting multiple time series once.
-ts_path_lst_ = [
- "root.sg_test_01.d_01.s_04",
- "root.sg_test_01.d_01.s_05",
- "root.sg_test_01.d_01.s_06",
- "root.sg_test_01.d_01.s_07",
- "root.sg_test_01.d_01.s_08",
- "root.sg_test_01.d_01.s_09",
-]
-data_type_lst_ = [
- TSDataType.FLOAT,
- TSDataType.DOUBLE,
- TSDataType.TEXT,
- TSDataType.FLOAT,
- TSDataType.DOUBLE,
- TSDataType.TEXT,
-]
-encoding_lst_ = [TSEncoding.PLAIN for _ in range(len(data_type_lst_))]
-compressor_lst_ = [Compressor.SNAPPY for _ in range(len(data_type_lst_))]
-session.create_multi_time_series(
- ts_path_lst_, data_type_lst_, encoding_lst_, compressor_lst_
-)
-ts_path_lst_ = [
- "root.sg_test_01.d_02.s_04",
- "root.sg_test_01.d_02.s_05",
- "root.sg_test_01.d_02.s_06",
- "root.sg_test_01.d_02.s_07",
- "root.sg_test_01.d_02.s_08",
- "root.sg_test_01.d_02.s_09",
-]
-data_type_lst_ = [
- TSDataType.FLOAT,
- TSDataType.DOUBLE,
- TSDataType.TEXT,
- TSDataType.FLOAT,
- TSDataType.DOUBLE,
- TSDataType.TEXT,
-]
-encoding_lst_ = [TSEncoding.PLAIN for _ in range(len(data_type_lst_))]
-compressor_lst_ = [Compressor.SNAPPY for _ in range(len(data_type_lst_))]
-tags_lst_ = [{"tag2": "v2"} for _ in range(len(data_type_lst_))]
-attributes_lst_ = [{"description": "v2"} for _ in range(len(data_type_lst_))]
-session.create_multi_time_series(
- ts_path_lst_, data_type_lst_, encoding_lst_, compressor_lst_, None, tags_lst_, attributes_lst_, None
-)
-
-# delete time series
-if (
- session.delete_time_series(
- [
- "root.sg_test_01.d_01.s_07",
- "root.sg_test_01.d_01.s_08",
- "root.sg_test_01.d_01.s_09",
- ]
- )
- < 0
-):
- test_fail()
- print_message("delete time series failed")
-
-# checking time series
-# s_07 expecting False
-if session.check_time_series_exists("root.sg_test_01.d_01.s_07"):
- test_fail()
- print_message("root.sg_test_01.d_01.s_07 shouldn't exist")
-
-# s_03 expecting True
-if not session.check_time_series_exists("root.sg_test_01.d_01.s_03"):
- test_fail()
- print_message("root.sg_test_01.d_01.s_03 should exist")
-# d_02.s_01 expecting True
-if not session.check_time_series_exists("root.sg_test_01.d_02.s_01"):
- test_fail()
- print_message("root.sg_test_01.d_02.s_01 should exist")
-# d_02.s_06 expecting True
-if not session.check_time_series_exists("root.sg_test_01.d_02.s_06"):
- test_fail()
- print_message("root.sg_test_01.d_02.s_06 should exist")
-
-# insert one record into the database.
-measurements_ = ["s_01", "s_02", "s_03", "s_04", "s_05", "s_06"]
-values_ = [False, 10, 11, 1.1, 10011.1, "test_record"]
-data_types_ = [
- TSDataType.BOOLEAN,
- TSDataType.INT32,
- TSDataType.INT64,
- TSDataType.FLOAT,
- TSDataType.DOUBLE,
- TSDataType.TEXT,
-]
-if (
- session.insert_record(
- "root.sg_test_01.d_01", 1, measurements_, data_types_, values_
- )
- < 0
-):
- test_fail()
- print_message("insert record failed")
-
-# insert multiple records into database
-measurements_list_ = [
- ["s_01", "s_02", "s_03", "s_04", "s_05", "s_06"],
- ["s_01", "s_02", "s_03", "s_04", "s_05", "s_06"],
-]
-values_list_ = [
- [False, 22, 33, 4.4, 55.1, "test_records01"],
- [True, 77, 88, 1.25, 8.125, "test_records02"],
-]
-data_type_list_ = [data_types_, data_types_]
-device_ids_ = ["root.sg_test_01.d_01", "root.sg_test_01.d_01"]
-if (
- session.insert_records(
- device_ids_, [2, 3], measurements_list_, data_type_list_, values_list_
- )
- < 0
-):
- test_fail()
- print_message("insert records failed")
-
-# insert one tablet into the database.
-values_ = [
- [False, 10, 11, 1.1, 10011.1, "test01"],
- [True, 100, 11111, 1.25, 101.0, "test02"],
- [False, 100, 1, 188.1, 688.25, "test03"],
- [True, 0, 0, 0, 6.25, "test04"],
-] # Non-ASCII text will cause error since bytes can only hold 0-128 nums.
-timestamps_ = [4, 5, 6, 7]
-tablet_ = Tablet(
- "root.sg_test_01.d_01", measurements_, data_types_, values_, timestamps_
-)
-
-if session.insert_tablet(tablet_) < 0:
- test_fail()
- print_message("insert tablet failed")
-
-# insert one numpy tablet into the database.
-np_values_ = [
- np.array([False, True, False, True], np.dtype('>?')),
- np.array([10, 100, 100, 0], np.dtype('>i4')),
- np.array([11, 11111, 1, 0], np.dtype('>i8')),
- np.array([1.1, 1.25, 188.1, 0], np.dtype('>f4')),
- np.array([10011.1, 101.0, 688.25, 6.25], np.dtype('>f8')),
- ["test01", "test02", "test03", "test04"],
-]
-np_timestamps_ = np.array([1, 2, 3, 4], np.dtype('>i8'))
-np_tablet_ = NumpyTablet(
- "root.sg_test_01.d_02", measurements_, data_types_, np_values_, np_timestamps_
-)
-if session.insert_tablet(np_tablet_) < 0:
- test_fail()
- print_message("insert numpy tablet failed")
-
-# insert multiple tablets into database
-tablet_01 = Tablet(
- "root.sg_test_01.d_01", measurements_, data_types_, values_, [8, 9, 10, 11]
-)
-tablet_02 = Tablet(
- "root.sg_test_01.d_01", measurements_, data_types_, values_, [12, 13, 14, 15]
-)
-if session.insert_tablets([tablet_01, tablet_02]) < 0:
- test_fail()
- print_message("insert tablets failed")
-
-# insert one tablet with empty cells into the database.
-values_ = [
- [None, 10, 11, 1.1, 10011.1, "test01"],
- [True, None, 11111, 1.25, 101.0, "test02"],
- [False, 100, 1, None, 688.25, "test03"],
- [True, 0, 0, 0, None, None],
-] # Non-ASCII text will cause error since bytes can only hold 0-128 nums.
-timestamps_ = [20, 21, 22, 23]
-tablet_ = Tablet(
- "root.sg_test_01.d_01", measurements_, data_types_, values_, timestamps_
-)
-if session.insert_tablet(tablet_) < 0:
- test_fail()
- print_message("insert tablet with empty cells failed")
-
-# insert records of one device
-time_list = [1, 2, 3]
-measurements_list = [
- ["s_01", "s_02", "s_03"],
- ["s_01", "s_02", "s_03"],
- ["s_01", "s_02", "s_03"],
-]
-data_types_list = [
- [TSDataType.BOOLEAN, TSDataType.INT32, TSDataType.INT64],
- [TSDataType.BOOLEAN, TSDataType.INT32, TSDataType.INT64],
- [TSDataType.BOOLEAN, TSDataType.INT32, TSDataType.INT64],
-]
-values_list = [[False, 22, 33], [True, 1, 23], [False, 15, 26]]
-
-if (
- session.insert_records_of_one_device(
- "root.sg_test_01.d_01",
- time_list,
- measurements_list,
- data_types_list,
- values_list,
- )
- < 0
-):
- test_fail()
- print_message("insert records of one device failed")
-
-# execute non-query sql statement
-if (
- session.execute_non_query_statement(
- "insert into root.sg_test_01.d_01(timestamp, s_02) values(16, 188)"
- )
- < 0
-):
- test_fail()
- print_message(
- "execute 'insert into root.sg_test_01.d_01(timestamp, s_02) values(16, 188)' failed"
- )
-
-# execute sql query statement
-session_data_set = session.execute_query_statement("select * from root.sg_test_01.d_01")
-session_data_set.set_fetch_size(1024)
-expect_count = 20
-actual_count = 0
-while session_data_set.has_next():
- print(session_data_set.next())
- actual_count += 1
-session_data_set.close_operation_handle()
-
-if actual_count != expect_count:
- test_fail()
- print_message(
- "query count mismatch: expect count: "
- + str(expect_count)
- + " actual count: "
- + str(actual_count)
- )
-
-# close session connection.
-session.close()
-
-if final_flag:
- print("All executions done!!")
-else:
- print("Some test failed, please have a check")
- print("failed count: ", failed_count)
- exit(1)
diff --git a/client-py/iotdb/Session.py b/client-py/iotdb/Session.py
index 567fed48d66d3..88af9e8343e92 100644
--- a/client-py/iotdb/Session.py
+++ b/client-py/iotdb/Session.py
@@ -110,7 +110,7 @@ def open(self, enable_rpc_compression):
username=self.__user,
password=self.__password,
zoneId=self.__zone_id,
- configuration={"version": "V_0_13"}
+ configuration={"version": "V_0_13"},
)
try:
@@ -193,8 +193,17 @@ def delete_storage_groups(self, storage_group_lst):
return Session.verify_success(status)
- def create_time_series(self, ts_path, data_type, encoding, compressor,
- props=None, tags=None, attributes=None, alias=None):
+ def create_time_series(
+ self,
+ ts_path,
+ data_type,
+ encoding,
+ compressor,
+ props=None,
+ tags=None,
+ attributes=None,
+ alias=None,
+ ):
"""
create single time series
:param ts_path: String, complete time series path (starts from root)
@@ -210,7 +219,15 @@ def create_time_series(self, ts_path, data_type, encoding, compressor,
encoding = encoding.value
compressor = compressor.value
request = TSCreateTimeseriesReq(
- self.__session_id, ts_path, data_type, encoding, compressor, props, tags, attributes, alias
+ self.__session_id,
+ ts_path,
+ data_type,
+ encoding,
+ compressor,
+ props,
+ tags,
+ attributes,
+ alias,
)
status = self.__client.createTimeseries(request)
logger.debug(
@@ -220,7 +237,7 @@ def create_time_series(self, ts_path, data_type, encoding, compressor,
return Session.verify_success(status)
def create_aligned_time_series(
- self, device_id, measurements_lst, data_type_lst, encoding_lst, compressor_lst
+ self, device_id, measurements_lst, data_type_lst, encoding_lst, compressor_lst
):
"""
create aligned time series
@@ -235,7 +252,12 @@ def create_aligned_time_series(
compressor_lst = [compressor.value for compressor in compressor_lst]
request = TSCreateAlignedTimeseriesReq(
- self.__session_id, device_id, measurements_lst, data_type_lst, encoding_lst, compressor_lst
+ self.__session_id,
+ device_id,
+ measurements_lst,
+ data_type_lst,
+ encoding_lst,
+ compressor_lst,
)
status = self.__client.createAlignedTimeseries(request)
logger.debug(
@@ -247,8 +269,15 @@ def create_aligned_time_series(
return Session.verify_success(status)
def create_multi_time_series(
- self, ts_path_lst, data_type_lst, encoding_lst, compressor_lst,
- props_lst=None, tags_lst=None, attributes_lst=None, alias_lst=None
+ self,
+ ts_path_lst,
+ data_type_lst,
+ encoding_lst,
+ compressor_lst,
+ props_lst=None,
+ tags_lst=None,
+ attributes_lst=None,
+ alias_lst=None,
):
"""
create multiple time series
@@ -266,8 +295,15 @@ def create_multi_time_series(
compressor_lst = [compressor.value for compressor in compressor_lst]
request = TSCreateMultiTimeseriesReq(
- self.__session_id, ts_path_lst, data_type_lst, encoding_lst, compressor_lst, props_lst, tags_lst,
- attributes_lst, alias_lst
+ self.__session_id,
+ ts_path_lst,
+ data_type_lst,
+ encoding_lst,
+ compressor_lst,
+ props_lst,
+ tags_lst,
+ attributes_lst,
+ alias_lst,
)
status = self.__client.createMultiTimeseries(request)
logger.debug(
@@ -319,7 +355,7 @@ def delete_data(self, paths_list, timestamp):
logger.exception("data deletion fails because: ", e)
def insert_str_record(self, device_id, timestamp, measurements, string_values):
- """ special case for inserting one row of String (TEXT) value """
+ """special case for inserting one row of String (TEXT) value"""
if type(string_values) == str:
string_values = [string_values]
if type(measurements) == str:
@@ -337,8 +373,10 @@ def insert_str_record(self, device_id, timestamp, measurements, string_values):
return Session.verify_success(status)
- def insert_aligned_str_record(self, device_id, timestamp, measurements, string_values):
- """ special case for inserting one row of String (TEXT) value """
+ def insert_aligned_str_record(
+ self, device_id, timestamp, measurements, string_values
+ ):
+ """special case for inserting one row of String (TEXT) value"""
if type(string_values) == str:
string_values = [string_values]
if type(measurements) == str:
@@ -409,7 +447,9 @@ def insert_records(
return Session.verify_success(status)
- def insert_aligned_record(self, device_id, timestamp, measurements, data_types, values):
+ def insert_aligned_record(
+ self, device_id, timestamp, measurements, data_types, values
+ ):
"""
insert one row of aligned record into database, if you want improve your performance, please use insertTablet method
for example a record at time=10086 with three measurements is:
@@ -522,7 +562,12 @@ def gen_insert_record_req(
)
values_in_bytes = Session.value_to_bytes(data_types, values)
return TSInsertRecordReq(
- self.__session_id, device_id, measurements, values_in_bytes, timestamp, is_aligned
+ self.__session_id,
+ device_id,
+ measurements,
+ values_in_bytes,
+ timestamp,
+ is_aligned,
)
def gen_insert_str_record_req(
@@ -537,7 +582,13 @@ def gen_insert_str_record_req(
)
def gen_insert_records_req(
- self, device_ids, times, measurements_lst, types_lst, values_lst, is_aligned=False
+ self,
+ device_ids,
+ times,
+ measurements_lst,
+ types_lst,
+ values_lst,
+ is_aligned=False,
):
if (
(len(device_ids) != len(measurements_lst))
@@ -561,7 +612,12 @@ def gen_insert_records_req(
value_lst.append(values_in_bytes)
return TSInsertRecordsReq(
- self.__session_id, device_ids, measurements_lst, value_lst, times, is_aligned
+ self.__session_id,
+ device_ids,
+ measurements_lst,
+ value_lst,
+ times,
+ is_aligned,
)
def insert_tablet(self, tablet):
@@ -621,7 +677,9 @@ def insert_aligned_tablets(self, tablet_lst):
insert multiple aligned tablets, tablets are independent to each other
:param tablet_lst: List of tablets
"""
- status = self.__client.insertTablets(self.gen_insert_tablets_req(tablet_lst, True))
+ status = self.__client.insertTablets(
+ self.gen_insert_tablets_req(tablet_lst, True)
+ )
logger.debug("insert multiple tablets, message: {}".format(status.message))
return Session.verify_success(status)
@@ -742,7 +800,13 @@ def insert_aligned_records_of_one_device_sorted(
return Session.verify_success(status)
def gen_insert_records_of_one_device_request(
- self, device_id, times_list, measurements_list, values_list, types_list, is_aligned=False
+ self,
+ device_id,
+ times_list,
+ measurements_list,
+ values_list,
+ types_list,
+ is_aligned=False,
):
binary_value_list = []
for values, data_types, measurements in zip(
@@ -762,7 +826,7 @@ def gen_insert_records_of_one_device_request(
measurements_list,
binary_value_list,
times_list,
- is_aligned
+ is_aligned,
)
def test_insert_tablet(self, tablet):
@@ -956,6 +1020,4 @@ def verify_success(status):
"""
if status.code == Session.SUCCESS_CODE:
return 0
-
- logger.error("error status is", status)
return -1
diff --git a/client-py/iotdb/utils/IoTDBConstants.py b/client-py/iotdb/utils/IoTDBConstants.py
index 50344d148047b..b01cd5078324c 100644
--- a/client-py/iotdb/utils/IoTDBConstants.py
+++ b/client-py/iotdb/utils/IoTDBConstants.py
@@ -17,6 +17,7 @@
#
from enum import Enum, unique
+import numpy as np
@unique
@@ -36,6 +37,16 @@ def __eq__(self, other) -> bool:
def __hash__(self):
return self.value
+ def np_dtype(self):
+ return {
+ TSDataType.BOOLEAN: np.dtype(">?"),
+ TSDataType.FLOAT: np.dtype(">f4"),
+ TSDataType.DOUBLE: np.dtype(">f8"),
+ TSDataType.INT32: np.dtype(">i4"),
+ TSDataType.INT64: np.dtype(">i8"),
+ TSDataType.TEXT: np.dtype("str"),
+ }[self]
+
@unique
class TSEncoding(Enum):
diff --git a/client-py/iotdb/utils/IoTDBRpcDataSet.py b/client-py/iotdb/utils/IoTDBRpcDataSet.py
index 83468ad4292ce..9d4bfa33cbf9e 100644
--- a/client-py/iotdb/utils/IoTDBRpcDataSet.py
+++ b/client-py/iotdb/utils/IoTDBRpcDataSet.py
@@ -233,7 +233,7 @@ def resultset_to_pandas(self):
data_array.append(value)
j += 1
offset += length
- data_array = np.array(data_array, dtype=np.object)
+ data_array = np.array(data_array, dtype=object)
else:
raise RuntimeError("unsupported data type {}.".format(data_type))
if data_array.dtype.byteorder == ">":
diff --git a/client-py/iotdb/utils/NumpyTablet.py b/client-py/iotdb/utils/NumpyTablet.py
index 72a83fee20873..b81a172a40994 100644
--- a/client-py/iotdb/utils/NumpyTablet.py
+++ b/client-py/iotdb/utils/NumpyTablet.py
@@ -17,15 +17,12 @@
#
import struct
-
from iotdb.utils.IoTDBConstants import TSDataType
from iotdb.utils.BitMap import BitMap
class NumpyTablet(object):
- def __init__(
- self, device_id, measurements, data_types, values, timestamps
- ):
+ def __init__(self, device_id, measurements, data_types, values, timestamps):
"""
creating a numpy tablet for insertion
for example, considering device: root.sg1.d1
@@ -41,6 +38,27 @@ def __init__(
:param values: List of numpy array, the values of each column should be the inner numpy array
:param timestamps: Numpy array, the timestamps
"""
+ if len(values) > 0 and len(values[0]) != len(timestamps):
+ raise RuntimeError(
+ "Input error! len(timestamps) does not equal to len(values[0])!"
+ )
+ if len(values) != len(data_types):
+ raise RuntimeError(
+ "Input error! len(values) does not equal to len(data_types)!"
+ )
+
+ if not self.check_sorted(timestamps):
+ index = timestamps.argsort()
+ timestamps = timestamps[index]
+ for i in range(len(values)):
+ values[i] = values[i][index]
+
+ if timestamps.dtype != TSDataType.INT64.np_dtype():
+ timestamps = timestamps.astype(TSDataType.INT64.np_dtype())
+ for i in range(len(values)):
+ if values[i].dtype != data_types[i].np_dtype():
+ values[i] = values[i].astype(data_types[i].np_dtype())
+
self.__values = values
self.__timestamps = timestamps
self.__device_id = device_id
@@ -68,6 +86,12 @@ def get_row_number(self):
def get_device_id(self):
return self.__device_id
+ def get_timestamps(self):
+ return self.__timestamps
+
+ def get_values(self):
+ return self.__values
+
def get_binary_timestamps(self):
return self.__timestamps.tobytes()
diff --git a/client-py/iotdb/utils/Tablet.py b/client-py/iotdb/utils/Tablet.py
index bf64118905d8f..58fc84603ac1a 100644
--- a/client-py/iotdb/utils/Tablet.py
+++ b/client-py/iotdb/utils/Tablet.py
@@ -23,9 +23,7 @@
class Tablet(object):
- def __init__(
- self, device_id, measurements, data_types, values, timestamps
- ):
+ def __init__(self, device_id, measurements, data_types, values, timestamps):
"""
creating a tablet for insertion
for example, considering device: root.sg1.d1
@@ -96,7 +94,7 @@ def get_binary_values(self):
has_none = False
for i in range(self.__column_number):
bitmap = None
- bitmaps.insert(i, bitmap)
+ bitmaps.append(bitmap)
if self.__data_types[i] == TSDataType.BOOLEAN:
format_str_list.append(str(self.__row_number))
format_str_list.append("?")
@@ -105,7 +103,7 @@ def get_binary_values(self):
values_tobe_packed.append(self.__values[j][i])
else:
values_tobe_packed.append(False)
- self.__mark_none_value(bitmaps, bitmap, i, j)
+ self.__mark_none_value(bitmaps, i, j)
has_none = True
elif self.__data_types[i] == TSDataType.INT32:
@@ -116,7 +114,7 @@ def get_binary_values(self):
values_tobe_packed.append(self.__values[j][i])
else:
values_tobe_packed.append(0)
- self.__mark_none_value(bitmaps, bitmap, i, j)
+ self.__mark_none_value(bitmaps, i, j)
has_none = True
elif self.__data_types[i] == TSDataType.INT64:
@@ -127,7 +125,7 @@ def get_binary_values(self):
values_tobe_packed.append(self.__values[j][i])
else:
values_tobe_packed.append(0)
- self.__mark_none_value(bitmaps, bitmap, i, j)
+ self.__mark_none_value(bitmaps, i, j)
has_none = True
elif self.__data_types[i] == TSDataType.FLOAT:
@@ -138,7 +136,7 @@ def get_binary_values(self):
values_tobe_packed.append(self.__values[j][i])
else:
values_tobe_packed.append(0)
- self.__mark_none_value(bitmaps, bitmap, i, j)
+ self.__mark_none_value(bitmaps, i, j)
has_none = True
elif self.__data_types[i] == TSDataType.DOUBLE:
@@ -149,7 +147,7 @@ def get_binary_values(self):
values_tobe_packed.append(self.__values[j][i])
else:
values_tobe_packed.append(0)
- self.__mark_none_value(bitmaps, bitmap, i, j)
+ self.__mark_none_value(bitmaps, i, j)
has_none = True
elif self.__data_types[i] == TSDataType.TEXT:
@@ -168,13 +166,11 @@ def get_binary_values(self):
format_str_list.append("s")
values_tobe_packed.append(len(value_bytes))
values_tobe_packed.append(value_bytes)
- self.__mark_none_value(bitmaps, bitmap, i, j)
+ self.__mark_none_value(bitmaps, i, j)
has_none = True
else:
- raise RuntimeError(
- "Unsupported data type:" + str(self.__data_types[i])
- )
+ raise RuntimeError("Unsupported data type:" + str(self.__data_types[i]))
if has_none:
for i in range(self.__column_number):
@@ -190,8 +186,7 @@ def get_binary_values(self):
format_str = "".join(format_str_list)
return struct.pack(format_str, *values_tobe_packed)
- def __mark_none_value(self, bitmaps, bitmap, column, row):
- if bitmap is None:
- bitmap = BitMap(self.__row_number)
- bitmaps.insert(column, bitmap)
- bitmap.mark(row)
+ def __mark_none_value(self, bitmaps, column, row):
+ if bitmaps[column] is None:
+ bitmaps[column] = BitMap(self.__row_number)
+ bitmaps[column].mark(row)
diff --git a/client-py/pom.xml b/client-py/pom.xml
index c2ca20b6d8c08..89853f7896677 100644
--- a/client-py/pom.xml
+++ b/client-py/pom.xml
@@ -24,7 +24,7 @@
org.apache.iotdbiotdb-parent
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmliotdb-python-api
diff --git a/client-py/requirements_dev.txt b/client-py/requirements_dev.txt
index b7ad33c3ad62a..824831a319ee9 100644
--- a/client-py/requirements_dev.txt
+++ b/client-py/requirements_dev.txt
@@ -19,7 +19,7 @@
-r requirements.txt
# Pytest to run tests
pytest==6.2.2
-thrift==0.13.0
+thrift==0.14.1
flake8==3.9.0
black==20.8b1
# For releases
diff --git a/client-py/tests/tablet_performance_comparison.py b/client-py/tests/tablet_performance_comparison.py
index 76d26f8f41824..3626e818a85d0 100644
--- a/client-py/tests/tablet_performance_comparison.py
+++ b/client-py/tests/tablet_performance_comparison.py
@@ -75,9 +75,9 @@ def generate_data(_type: TSDataType):
if _type == TSDataType.BOOLEAN:
return [random.randint(0, 1) == 1 for _ in range(_row)]
elif _type == TSDataType.INT32:
- return [random.randint(-(2 ** 31), 2 ** 31) for _ in range(_row)]
+ return [random.randint(-(2**31), 2**31) for _ in range(_row)]
elif _type == TSDataType.INT64:
- return [random.randint(-(2 ** 63), 2 ** 63) for _ in range(_row)]
+ return [random.randint(-(2**63), 2**63) for _ in range(_row)]
elif _type == TSDataType.FLOAT:
return [1.5 for _ in range(_row)]
elif _type == TSDataType.DOUBLE:
@@ -208,9 +208,7 @@ def performance_test(
for m in measurements:
value_array.append(csv_data.at[t, m])
values.append(value_array)
- tablet = Tablet(
- device_id, measurements, data_types, values, timestamps_
- )
+ tablet = Tablet(device_id, measurements, data_types, values, timestamps_)
else:
# Use the NEW method to construct numpy tablet
timestamps_ = csv_data[TIME_STR].values
diff --git a/client-py/tests/test_aligned_timeseries.py b/client-py/tests/test_aligned_timeseries.py
new file mode 100644
index 0000000000000..ef57af5f59f67
--- /dev/null
+++ b/client-py/tests/test_aligned_timeseries.py
@@ -0,0 +1,294 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# Uncomment the following line to use apache-iotdb module installed by pip3
+from iotdb.Session import Session
+from iotdb.utils.IoTDBConstants import TSDataType, TSEncoding, Compressor
+from iotdb.utils.Tablet import Tablet
+from iotdb.IoTDBContainer import IoTDBContainer
+
+# whether the test has passed
+final_flag = True
+failed_count = 0
+
+
+def test_fail():
+ global failed_count
+ global final_flag
+ final_flag = False
+ failed_count += 1
+
+
+def print_message(message):
+ print("*********")
+ print(message)
+ print("*********")
+
+
+def test_aligned_timeseries():
+ with IoTDBContainer("iotdb:dev") as db:
+ db: IoTDBContainer
+ session = Session(db.get_container_host_ip(), db.get_exposed_port(6667))
+ session.open(False)
+
+ if not session.is_open():
+ print("can't open session")
+ exit(1)
+
+ # set and delete storage groups
+ session.set_storage_group("root.sg_test_01")
+ session.set_storage_group("root.sg_test_02")
+ session.set_storage_group("root.sg_test_03")
+ session.set_storage_group("root.sg_test_04")
+
+ if session.delete_storage_group("root.sg_test_02") < 0:
+ test_fail()
+ print_message("delete storage group failed")
+
+ if session.delete_storage_groups(["root.sg_test_03", "root.sg_test_04"]) < 0:
+ test_fail()
+ print_message("delete storage groups failed")
+
+ # setting aligned time series.
+ measurements_lst_ = [
+ "s_01",
+ "s_02",
+ "s_03",
+ ]
+ data_type_lst_ = [
+ TSDataType.BOOLEAN,
+ TSDataType.INT32,
+ TSDataType.INT64,
+ ]
+ encoding_lst_ = [TSEncoding.PLAIN for _ in range(len(data_type_lst_))]
+ compressor_lst_ = [Compressor.SNAPPY for _ in range(len(data_type_lst_))]
+ session.create_aligned_time_series(
+ "root.sg_test_01.d_02",
+ measurements_lst_,
+ data_type_lst_,
+ encoding_lst_,
+ compressor_lst_,
+ )
+
+ # setting more aligned time series once.
+ measurements_lst_ = [
+ "s_04",
+ "s_05",
+ "s_06",
+ "s_07",
+ "s_08",
+ "s_09",
+ ]
+ data_type_lst_ = [
+ TSDataType.FLOAT,
+ TSDataType.DOUBLE,
+ TSDataType.TEXT,
+ TSDataType.FLOAT,
+ TSDataType.DOUBLE,
+ TSDataType.TEXT,
+ ]
+ encoding_lst_ = [TSEncoding.PLAIN for _ in range(len(data_type_lst_))]
+ compressor_lst_ = [Compressor.SNAPPY for _ in range(len(data_type_lst_))]
+ session.create_aligned_time_series(
+ "root.sg_test_01.d_02",
+ measurements_lst_,
+ data_type_lst_,
+ encoding_lst_,
+ compressor_lst_,
+ )
+
+ # delete time series
+ if (
+ session.delete_time_series(
+ [
+ "root.sg_test_01.d_02.s_07",
+ "root.sg_test_01.d_02.s_08",
+ "root.sg_test_01.d_02.s_09",
+ ]
+ )
+ < 0
+ ):
+ test_fail()
+ print_message("delete time series failed")
+
+ # checking time series
+ # s_07 expecting False
+ if session.check_time_series_exists("root.sg_test_01.d_02.s_07"):
+ test_fail()
+ print_message("root.sg_test_01.d_02.s_07 shouldn't exist")
+
+ # s_03 expecting True
+ if not session.check_time_series_exists("root.sg_test_01.d_02.s_03"):
+ test_fail()
+ print_message("root.sg_test_01.d_02.s_03 should exist")
+
+ # insert one record into the database.
+ measurements_ = ["s_01", "s_02", "s_03", "s_04", "s_05", "s_06"]
+ values_ = [False, 10, 11, 1.1, 10011.1, "test_record"]
+ data_types_ = [
+ TSDataType.BOOLEAN,
+ TSDataType.INT32,
+ TSDataType.INT64,
+ TSDataType.FLOAT,
+ TSDataType.DOUBLE,
+ TSDataType.TEXT,
+ ]
+ if (
+ session.insert_aligned_record(
+ "root.sg_test_01.d_02", 1, measurements_, data_types_, values_
+ )
+ < 0
+ ):
+ test_fail()
+ print_message("insert record failed")
+
+ # insert multiple records into database
+ measurements_list_ = [
+ ["s_01", "s_02", "s_03", "s_04", "s_05", "s_06"],
+ ["s_01", "s_02", "s_03", "s_04", "s_05", "s_06"],
+ ]
+ values_list_ = [
+ [False, 22, 33, 4.4, 55.1, "test_records01"],
+ [True, 77, 88, 1.25, 8.125, "test_records02"],
+ ]
+ data_type_list_ = [data_types_, data_types_]
+ device_ids_ = ["root.sg_test_01.d_02", "root.sg_test_01.d_02"]
+ if (
+ session.insert_aligned_records(
+ device_ids_, [2, 3], measurements_list_, data_type_list_, values_list_
+ )
+ < 0
+ ):
+ test_fail()
+ print_message("insert records failed")
+
+ # insert one tablet into the database.
+ values_ = [
+ [False, 10, 11, 1.1, 10011.1, "test01"],
+ [True, 100, 11111, 1.25, 101.0, "test02"],
+ [False, 100, 1, 188.1, 688.25, "test03"],
+ [True, 0, 0, 0, 6.25, "test04"],
+ ] # Non-ASCII text will cause error since bytes can only hold 0-128 nums.
+ timestamps_ = [4, 5, 6, 7]
+ tablet_ = Tablet(
+ "root.sg_test_01.d_02", measurements_, data_types_, values_, timestamps_
+ )
+ if session.insert_aligned_tablet(tablet_) < 0:
+ test_fail()
+ print_message("insert tablet failed")
+
+ # insert multiple tablets into database
+ tablet_01 = Tablet(
+ "root.sg_test_01.d_02", measurements_, data_types_, values_, [8, 9, 10, 11]
+ )
+ tablet_02 = Tablet(
+ "root.sg_test_01.d_02",
+ measurements_,
+ data_types_,
+ values_,
+ [12, 13, 14, 15],
+ )
+ if session.insert_aligned_tablets([tablet_01, tablet_02]) < 0:
+ test_fail()
+ print_message("insert tablets failed")
+
+ # insert one tablet with empty cells into the database.
+ values_ = [
+ [None, 10, 11, 1.1, 10011.1, "test01"],
+ [True, None, 11111, 1.25, 101.0, "test02"],
+ [False, 100, 1, None, 688.25, "test03"],
+ [True, 0, 0, 0, None, None],
+ ] # Non-ASCII text will cause error since bytes can only hold 0-128 nums.
+ timestamps_ = [20, 21, 22, 23]
+ tablet_ = Tablet(
+ "root.sg_test_01.d_02", measurements_, data_types_, values_, timestamps_
+ )
+ if session.insert_aligned_tablet(tablet_) < 0:
+ test_fail()
+ print_message("insert tablet with empty cells failed")
+
+ # insert records of one device
+ time_list = [1, 2, 3]
+ measurements_list = [
+ ["s_01", "s_02", "s_03"],
+ ["s_01", "s_02", "s_03"],
+ ["s_01", "s_02", "s_03"],
+ ]
+ data_types_list = [
+ [TSDataType.BOOLEAN, TSDataType.INT32, TSDataType.INT64],
+ [TSDataType.BOOLEAN, TSDataType.INT32, TSDataType.INT64],
+ [TSDataType.BOOLEAN, TSDataType.INT32, TSDataType.INT64],
+ ]
+ values_list = [[False, 22, 33], [True, 1, 23], [False, 15, 26]]
+
+ if (
+ session.insert_aligned_records_of_one_device(
+ "root.sg_test_01.d_02",
+ time_list,
+ measurements_list,
+ data_types_list,
+ values_list,
+ )
+ < 0
+ ):
+ test_fail()
+ print_message("insert records of one device failed")
+
+ # execute non-query sql statement
+ if (
+ session.execute_non_query_statement(
+ "insert into root.sg_test_01.d_02(timestamp, s_02) aligned values(16, 188)"
+ )
+ < 0
+ ):
+ test_fail()
+ print_message(
+ "execute 'insert into root.sg_test_01.d_02(timestamp, s_02) aligned values(16, 188)' failed"
+ )
+
+ # execute sql query statement
+ session_data_set = session.execute_query_statement(
+ "select * from root.sg_test_01.d_02"
+ )
+ session_data_set.set_fetch_size(1024)
+ expect_count = 20
+ actual_count = 0
+ while session_data_set.has_next():
+ print(session_data_set.next())
+ actual_count += 1
+ session_data_set.close_operation_handle()
+
+ if actual_count != expect_count:
+ test_fail()
+ print_message(
+ "query count mismatch: expect count: "
+ + str(expect_count)
+ + " actual count: "
+ + str(actual_count)
+ )
+
+ # close session connection.
+ session.close()
+
+
+if final_flag:
+ print("All executions done!!")
+else:
+ print("Some test failed, please have a check")
+ print("failed count: ", failed_count)
+ exit(1)
diff --git a/client-py/tests/test_dataframe.py b/client-py/tests/test_dataframe.py
index 4cfa57640aa6e..9a341e1e485d5 100644
--- a/client-py/tests/test_dataframe.py
+++ b/client-py/tests/test_dataframe.py
@@ -23,16 +23,17 @@
def test_simple_query():
- with IoTDBContainer("apache/iotdb:0.11.2") as db:
+ with IoTDBContainer("iotdb:dev") as db:
db: IoTDBContainer
session = Session(db.get_container_host_ip(), db.get_exposed_port(6667))
session.open(False)
+ session.execute_non_query_statement("set storage group to root.device")
# Write data
session.insert_str_record("root.device", 123, "pressure", "15.0")
# Read
- session_data_set = session.execute_query_statement("SELECT * FROM root.*")
+ session_data_set = session.execute_query_statement("SELECT ** FROM root")
df = session_data_set.todf()
session.close()
@@ -42,11 +43,11 @@ def test_simple_query():
def test_non_time_query():
- with IoTDBContainer("apache/iotdb:0.11.2") as db:
+ with IoTDBContainer("iotdb:dev") as db:
db: IoTDBContainer
session = Session(db.get_container_host_ip(), db.get_exposed_port(6667))
session.open(False)
-
+ session.execute_non_query_statement("set storage group to root.device")
# Write data
session.insert_str_record("root.device", 123, "pressure", "15.0")
@@ -68,8 +69,7 @@ def test_non_time_query():
]
assert_array_equal(
df.values,
- [
- [
+ [[
"root.device.pressure",
None,
"root.device",
diff --git a/client-py/tests/test_numpy_tablet.py b/client-py/tests/test_numpy_tablet.py
new file mode 100644
index 0000000000000..b984193975724
--- /dev/null
+++ b/client-py/tests/test_numpy_tablet.py
@@ -0,0 +1,147 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import numpy as np
+from iotdb.utils.IoTDBConstants import TSDataType
+from iotdb.utils.NumpyTablet import NumpyTablet
+from iotdb.utils.Tablet import Tablet
+
+
+def test_numpy_tablet_serialization():
+
+ measurements_ = ["s_01", "s_02", "s_03", "s_04", "s_05", "s_06"]
+ data_types_ = [
+ TSDataType.BOOLEAN,
+ TSDataType.INT32,
+ TSDataType.INT64,
+ TSDataType.FLOAT,
+ TSDataType.DOUBLE,
+ TSDataType.TEXT,
+ ]
+ values_ = [
+ [False, 10, 11, 1.1, 10011.1, "test01"],
+ [True, 100, 11111, 1.25, 101.0, "test02"],
+ [False, 100, 1, 188.1, 688.25, "test03"],
+ [True, 0, 0, 0, 6.25, "test04"],
+ ]
+ timestamps_ = [16, 17, 18, 19]
+ tablet_ = Tablet(
+ "root.sg_test_01.d_01", measurements_, data_types_, values_, timestamps_
+ )
+ np_values_ = [
+ np.array([False, True, False, True], np.dtype(">?")),
+ np.array([10, 100, 100, 0], np.dtype(">i4")),
+ np.array([11, 11111, 1, 0], np.dtype(">i8")),
+ np.array([1.1, 1.25, 188.1, 0], np.dtype(">f4")),
+ np.array([10011.1, 101.0, 688.25, 6.25], np.dtype(">f8")),
+ np.array(["test01", "test02", "test03", "test04"]),
+ ]
+ np_timestamps_ = np.array([16, 17, 18, 19], np.dtype(">i8"))
+ np_tablet_ = NumpyTablet(
+ "root.sg_test_01.d_01", measurements_, data_types_, np_values_, np_timestamps_
+ )
+ assert tablet_.get_binary_timestamps() == np_tablet_.get_binary_timestamps()
+ assert tablet_.get_binary_values() == np_tablet_.get_binary_values()
+
+
+def test_sort_numpy_tablet():
+
+ measurements_ = ["s_01", "s_02", "s_03", "s_04", "s_05", "s_06"]
+ data_types_ = [
+ TSDataType.BOOLEAN,
+ TSDataType.INT32,
+ TSDataType.INT64,
+ TSDataType.FLOAT,
+ TSDataType.DOUBLE,
+ TSDataType.TEXT,
+ ]
+ values_ = [
+ [True, 10000, 11111, 8.999, 776, "test05"],
+ [True, 1000, 1111, 0, 6.25, "test06"],
+ [False, 100, 111, 188.1, 688.25, "test07"],
+ [False, 10, 11, 1.25, 101.0, "test08"],
+ [False, 0, 1, 1.1, 10011.1, "test09"],
+ ]
+ timestamps_ = [5, 6, 7, 8, 9]
+ tablet_ = Tablet(
+ "root.sg_test_01.d_01", measurements_, data_types_, values_, timestamps_
+ )
+ np_values_unsorted = [
+ np.array([False, False, False, True, True], np.dtype(">?")),
+ np.array([0, 10, 100, 1000, 10000], np.dtype(">i4")),
+ np.array([1, 11, 111, 1111, 11111], np.dtype(">i8")),
+ np.array([1.1, 1.25, 188.1, 0, 8.999], np.dtype(">f4")),
+ np.array([10011.1, 101.0, 688.25, 6.25, 776], np.dtype(">f8")),
+ np.array(["test09", "test08", "test07", "test06", "test05"]),
+ ]
+ np_timestamps_unsorted = np.array([9, 8, 7, 6, 5], np.dtype(">i8"))
+ np_tablet_ = NumpyTablet(
+ "root.sg_test_01.d_01",
+ measurements_,
+ data_types_,
+ np_values_unsorted,
+ np_timestamps_unsorted,
+ )
+ assert tablet_.get_binary_timestamps() == np_tablet_.get_binary_timestamps()
+ assert tablet_.get_binary_values() == np_tablet_.get_binary_values()
+
+
+def test_numpy_tablet_auto_correct_datatype():
+
+ measurements_ = ["s_01", "s_02", "s_03", "s_04", "s_05", "s_06"]
+ data_types_ = [
+ TSDataType.BOOLEAN,
+ TSDataType.INT32,
+ TSDataType.INT64,
+ TSDataType.FLOAT,
+ TSDataType.DOUBLE,
+ TSDataType.TEXT,
+ ]
+ values_ = [
+ [True, 10000, 11111, 8.999, 776, "test05"],
+ [True, 1000, 1111, 0, 6.25, "test06"],
+ [False, 100, 111, 188.1, 688.25, "test07"],
+ [False, 10, 11, 1.25, 101.0, "test08"],
+ [False, 0, 1, 1.1, 10011.1, "test09"],
+ ]
+ timestamps_ = [5, 6, 7, 8, 9]
+ tablet_ = Tablet(
+ "root.sg_test_01.d_01", measurements_, data_types_, values_, timestamps_
+ )
+ np_values_unsorted = [
+ np.array([False, False, False, True, True]),
+ np.array([0, 10, 100, 1000, 10000]),
+ np.array([1, 11, 111, 1111, 11111]),
+ np.array([1.1, 1.25, 188.1, 0, 8.999]),
+ np.array([10011.1, 101.0, 688.25, 6.25, 776]),
+ np.array(["test09", "test08", "test07", "test06", "test05"]),
+ ]
+ np_timestamps_unsorted = np.array([9, 8, 7, 6, 5])
+ # numpy.dtype of int and float should be little endian by default
+ assert np_timestamps_unsorted.dtype != np.dtype(">i8")
+ for i in range(1, 4):
+ assert np_values_unsorted[i].dtype != data_types_[i].np_dtype()
+ np_tablet_ = NumpyTablet(
+ "root.sg_test_01.d_01",
+ measurements_,
+ data_types_,
+ np_values_unsorted,
+ np_timestamps_unsorted,
+ )
+ assert tablet_.get_binary_timestamps() == np_tablet_.get_binary_timestamps()
+ assert tablet_.get_binary_values() == np_tablet_.get_binary_values()
diff --git a/client-py/tests/test_session.py b/client-py/tests/test_session.py
new file mode 100644
index 0000000000000..6ecd14205c758
--- /dev/null
+++ b/client-py/tests/test_session.py
@@ -0,0 +1,362 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# Uncomment the following line to use apache-iotdb module installed by pip3
+import numpy as np
+
+from iotdb.Session import Session
+from iotdb.utils.IoTDBConstants import TSDataType, TSEncoding, Compressor
+from iotdb.utils.NumpyTablet import NumpyTablet
+from iotdb.utils.Tablet import Tablet
+from iotdb.IoTDBContainer import IoTDBContainer
+
+# whether the test has passed
+final_flag = True
+failed_count = 0
+
+
+def test_fail():
+ global failed_count
+ global final_flag
+ final_flag = False
+ failed_count += 1
+
+
+def print_message(message):
+ print("*********")
+ print(message)
+ print("*********")
+
+
+def test_session():
+ with IoTDBContainer("iotdb:dev") as db:
+ db: IoTDBContainer
+ session = Session(db.get_container_host_ip(), db.get_exposed_port(6667))
+ session.open(False)
+
+ if not session.is_open():
+ print("can't open session")
+ exit(1)
+
+ # set and delete storage groups
+ session.set_storage_group("root.sg_test_01")
+ session.set_storage_group("root.sg_test_02")
+ session.set_storage_group("root.sg_test_03")
+ session.set_storage_group("root.sg_test_04")
+
+ if session.delete_storage_group("root.sg_test_02") < 0:
+ test_fail()
+ print_message("delete storage group failed")
+
+ if session.delete_storage_groups(["root.sg_test_03", "root.sg_test_04"]) < 0:
+ test_fail()
+ print_message("delete storage groups failed")
+
+ # setting time series.
+ session.create_time_series(
+ "root.sg_test_01.d_01.s_01",
+ TSDataType.BOOLEAN,
+ TSEncoding.PLAIN,
+ Compressor.SNAPPY,
+ )
+ session.create_time_series(
+ "root.sg_test_01.d_01.s_02",
+ TSDataType.INT32,
+ TSEncoding.PLAIN,
+ Compressor.SNAPPY,
+ )
+ session.create_time_series(
+ "root.sg_test_01.d_01.s_03",
+ TSDataType.INT64,
+ TSEncoding.PLAIN,
+ Compressor.SNAPPY,
+ )
+ session.create_time_series(
+ "root.sg_test_01.d_02.s_01",
+ TSDataType.BOOLEAN,
+ TSEncoding.PLAIN,
+ Compressor.SNAPPY,
+ None,
+ {"tag1": "v1"},
+ {"description": "v1"},
+ "temperature",
+ )
+
+ # setting multiple time series once.
+ ts_path_lst_ = [
+ "root.sg_test_01.d_01.s_04",
+ "root.sg_test_01.d_01.s_05",
+ "root.sg_test_01.d_01.s_06",
+ "root.sg_test_01.d_01.s_07",
+ "root.sg_test_01.d_01.s_08",
+ "root.sg_test_01.d_01.s_09",
+ ]
+ data_type_lst_ = [
+ TSDataType.FLOAT,
+ TSDataType.DOUBLE,
+ TSDataType.TEXT,
+ TSDataType.FLOAT,
+ TSDataType.DOUBLE,
+ TSDataType.TEXT,
+ ]
+ encoding_lst_ = [TSEncoding.PLAIN for _ in range(len(data_type_lst_))]
+ compressor_lst_ = [Compressor.SNAPPY for _ in range(len(data_type_lst_))]
+ session.create_multi_time_series(
+ ts_path_lst_, data_type_lst_, encoding_lst_, compressor_lst_
+ )
+ ts_path_lst_ = [
+ "root.sg_test_01.d_02.s_04",
+ "root.sg_test_01.d_02.s_05",
+ "root.sg_test_01.d_02.s_06",
+ "root.sg_test_01.d_02.s_07",
+ "root.sg_test_01.d_02.s_08",
+ "root.sg_test_01.d_02.s_09",
+ ]
+ data_type_lst_ = [
+ TSDataType.FLOAT,
+ TSDataType.DOUBLE,
+ TSDataType.TEXT,
+ TSDataType.FLOAT,
+ TSDataType.DOUBLE,
+ TSDataType.TEXT,
+ ]
+ encoding_lst_ = [TSEncoding.PLAIN for _ in range(len(data_type_lst_))]
+ compressor_lst_ = [Compressor.SNAPPY for _ in range(len(data_type_lst_))]
+ tags_lst_ = [{"tag2": "v2"} for _ in range(len(data_type_lst_))]
+ attributes_lst_ = [{"description": "v2"} for _ in range(len(data_type_lst_))]
+ session.create_multi_time_series(
+ ts_path_lst_,
+ data_type_lst_,
+ encoding_lst_,
+ compressor_lst_,
+ None,
+ tags_lst_,
+ attributes_lst_,
+ None,
+ )
+
+ # delete time series
+ if (
+ session.delete_time_series(
+ [
+ "root.sg_test_01.d_01.s_07",
+ "root.sg_test_01.d_01.s_08",
+ "root.sg_test_01.d_01.s_09",
+ ]
+ )
+ < 0
+ ):
+ test_fail()
+ print_message("delete time series failed")
+
+ # checking time series
+ # s_07 expecting False
+ if session.check_time_series_exists("root.sg_test_01.d_01.s_07"):
+ test_fail()
+ print_message("root.sg_test_01.d_01.s_07 shouldn't exist")
+
+ # s_03 expecting True
+ if not session.check_time_series_exists("root.sg_test_01.d_01.s_03"):
+ test_fail()
+ print_message("root.sg_test_01.d_01.s_03 should exist")
+ # d_02.s_01 expecting True
+ if not session.check_time_series_exists("root.sg_test_01.d_02.s_01"):
+ test_fail()
+ print_message("root.sg_test_01.d_02.s_01 should exist")
+ # d_02.s_06 expecting True
+ if not session.check_time_series_exists("root.sg_test_01.d_02.s_06"):
+ test_fail()
+ print_message("root.sg_test_01.d_02.s_06 should exist")
+
+ # insert one record into the database.
+ measurements_ = ["s_01", "s_02", "s_03", "s_04", "s_05", "s_06"]
+ values_ = [False, 10, 11, 1.1, 10011.1, "test_record"]
+ data_types_ = [
+ TSDataType.BOOLEAN,
+ TSDataType.INT32,
+ TSDataType.INT64,
+ TSDataType.FLOAT,
+ TSDataType.DOUBLE,
+ TSDataType.TEXT,
+ ]
+ if (
+ session.insert_record(
+ "root.sg_test_01.d_01", 1, measurements_, data_types_, values_
+ )
+ < 0
+ ):
+ test_fail()
+ print_message("insert record failed")
+
+ # insert multiple records into database
+ measurements_list_ = [
+ ["s_01", "s_02", "s_03", "s_04", "s_05", "s_06"],
+ ["s_01", "s_02", "s_03", "s_04", "s_05", "s_06"],
+ ]
+ values_list_ = [
+ [False, 22, 33, 4.4, 55.1, "test_records01"],
+ [True, 77, 88, 1.25, 8.125, "test_records02"],
+ ]
+ data_type_list_ = [data_types_, data_types_]
+ device_ids_ = ["root.sg_test_01.d_01", "root.sg_test_01.d_01"]
+ if (
+ session.insert_records(
+ device_ids_, [2, 3], measurements_list_, data_type_list_, values_list_
+ )
+ < 0
+ ):
+ test_fail()
+ print_message("insert records failed")
+
+ # insert one tablet into the database.
+ values_ = [
+ [False, 10, 11, 1.1, 10011.1, "test01"],
+ [True, 100, 11111, 1.25, 101.0, "test02"],
+ [False, 100, 1, 188.1, 688.25, "test03"],
+ [True, 0, 0, 0, 6.25, "test04"],
+ ] # Non-ASCII text will cause error since bytes can only hold 0-128 nums.
+ timestamps_ = [4, 5, 6, 7]
+ tablet_ = Tablet(
+ "root.sg_test_01.d_01", measurements_, data_types_, values_, timestamps_
+ )
+
+ if session.insert_tablet(tablet_) < 0:
+ test_fail()
+ print_message("insert tablet failed")
+
+ # insert one numpy tablet into the database.
+ np_values_ = [
+ np.array([False, True, False, True], np.dtype(">?")),
+ np.array([10, 100, 100, 0], np.dtype(">i4")),
+ np.array([11, 11111, 1, 0], np.dtype(">i8")),
+ np.array([1.1, 1.25, 188.1, 0], np.dtype(">f4")),
+ np.array([10011.1, 101.0, 688.25, 6.25], np.dtype(">f8")),
+ np.array(["test01", "test02", "test03", "test04"]),
+ ]
+ np_timestamps_ = np.array([1, 2, 3, 4], np.dtype(">i8"))
+ np_tablet_ = NumpyTablet(
+ "root.sg_test_01.d_02",
+ measurements_,
+ data_types_,
+ np_values_,
+ np_timestamps_,
+ )
+ if session.insert_tablet(np_tablet_) < 0:
+ test_fail()
+ print_message("insert numpy tablet failed")
+
+ # insert multiple tablets into database
+ tablet_01 = Tablet(
+ "root.sg_test_01.d_01", measurements_, data_types_, values_, [8, 9, 10, 11]
+ )
+ tablet_02 = Tablet(
+ "root.sg_test_01.d_01",
+ measurements_,
+ data_types_,
+ values_,
+ [12, 13, 14, 15],
+ )
+ if session.insert_tablets([tablet_01, tablet_02]) < 0:
+ test_fail()
+ print_message("insert tablets failed")
+
+ # insert one tablet with empty cells into the database.
+ values_ = [
+ [None, 10, 11, 1.1, 10011.1, "test01"],
+ [True, None, 11111, 1.25, 101.0, "test02"],
+ [False, 100, 1, None, 688.25, "test03"],
+ [True, 0, 0, 0, None, None],
+ ] # Non-ASCII text will cause error since bytes can only hold 0-128 nums.
+ timestamps_ = [20, 21, 22, 23]
+ tablet_ = Tablet(
+ "root.sg_test_01.d_01", measurements_, data_types_, values_, timestamps_
+ )
+ if session.insert_tablet(tablet_) < 0:
+ test_fail()
+ print_message("insert tablet with empty cells failed")
+
+ # insert records of one device
+ time_list = [1, 2, 3]
+ measurements_list = [
+ ["s_01", "s_02", "s_03"],
+ ["s_01", "s_02", "s_03"],
+ ["s_01", "s_02", "s_03"],
+ ]
+ data_types_list = [
+ [TSDataType.BOOLEAN, TSDataType.INT32, TSDataType.INT64],
+ [TSDataType.BOOLEAN, TSDataType.INT32, TSDataType.INT64],
+ [TSDataType.BOOLEAN, TSDataType.INT32, TSDataType.INT64],
+ ]
+ values_list = [[False, 22, 33], [True, 1, 23], [False, 15, 26]]
+
+ if (
+ session.insert_records_of_one_device(
+ "root.sg_test_01.d_01",
+ time_list,
+ measurements_list,
+ data_types_list,
+ values_list,
+ )
+ < 0
+ ):
+ test_fail()
+ print_message("insert records of one device failed")
+
+ # execute non-query sql statement
+ if (
+ session.execute_non_query_statement(
+ "insert into root.sg_test_01.d_01(timestamp, s_02) values(16, 188)"
+ )
+ < 0
+ ):
+ test_fail()
+ print_message(
+ "execute 'insert into root.sg_test_01.d_01(timestamp, s_02) values(16, 188)' failed"
+ )
+
+ # execute sql query statement
+ session_data_set = session.execute_query_statement(
+ "select * from root.sg_test_01.d_01"
+ )
+ session_data_set.set_fetch_size(1024)
+ expect_count = 20
+ actual_count = 0
+ while session_data_set.has_next():
+ print(session_data_set.next())
+ actual_count += 1
+ session_data_set.close_operation_handle()
+
+ if actual_count != expect_count:
+ test_fail()
+ print_message(
+ "query count mismatch: expect count: "
+ + str(expect_count)
+ + " actual count: "
+ + str(actual_count)
+ )
+
+ # close session connection.
+ session.close()
+
+
+if final_flag:
+ print("All executions done!!")
+else:
+ print("Some test failed, please have a check")
+ print("failed count: ", failed_count)
+ exit(1)
diff --git a/client-py/tests/test_tablet.py b/client-py/tests/test_tablet.py
new file mode 100644
index 0000000000000..3ad0fdcb0caba
--- /dev/null
+++ b/client-py/tests/test_tablet.py
@@ -0,0 +1,111 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import pandas as pd
+from pandas.testing import assert_frame_equal
+
+from iotdb.IoTDBContainer import IoTDBContainer
+from iotdb.Session import Session
+from iotdb.utils.IoTDBConstants import TSDataType
+from iotdb.utils.Tablet import Tablet
+
+
+def test_tablet_insertion():
+ with IoTDBContainer("iotdb:dev") as db:
+ db: IoTDBContainer
+ session = Session(db.get_container_host_ip(), db.get_exposed_port(6667))
+ session.open(False)
+
+ measurements_ = ["s_01", "s_02", "s_03", "s_04", "s_05", "s_06"]
+ data_types_ = [
+ TSDataType.BOOLEAN,
+ TSDataType.INT32,
+ TSDataType.INT64,
+ TSDataType.FLOAT,
+ TSDataType.DOUBLE,
+ TSDataType.TEXT,
+ ]
+ values_ = [
+ [False, 10, 11, 1.1, 10011.1, "test01"],
+ [True, 100, 11111, 1.25, 101.0, "test02"],
+ [False, 100, 1, 188.1, 688.25, "test03"],
+ [True, 0, 0, 0, 6.25, "test04"],
+ ]
+ timestamps_ = [16, 17, 18, 19]
+ tablet_ = Tablet(
+ "root.sg_test_01.d_01", measurements_, data_types_, values_, timestamps_
+ )
+ session.execute_non_query_statement("set storage group to root.sg_test_01")
+ session.insert_tablet(tablet_)
+ columns = []
+ for measurement in measurements_:
+ columns.append("root.sg_test_01.d_01." + measurement)
+ df_input = pd.DataFrame(values_, None, columns)
+ df_input.insert(0, "Time", timestamps_)
+
+ session_data_set = session.execute_query_statement(
+ "select s_01, s_02, s_03, s_04, s_05, s_06 from root.sg_test_01.d_01"
+ )
+ df_output = session_data_set.todf()
+ df_output = df_output[df_input.columns.tolist()]
+
+ session.close()
+ assert_frame_equal(df_input, df_output, False)
+
+
+def test_nullable_tablet_insertion():
+ with IoTDBContainer("iotdb:dev") as db:
+ db: IoTDBContainer
+ session = Session(db.get_container_host_ip(), db.get_exposed_port(6667))
+ session.open(False)
+
+ measurements_ = ["s_01", "s_02", "s_03", "s_04", "s_05", "s_06"]
+ data_types_ = [
+ TSDataType.BOOLEAN,
+ TSDataType.INT32,
+ TSDataType.INT64,
+ TSDataType.FLOAT,
+ TSDataType.DOUBLE,
+ TSDataType.TEXT,
+ ]
+ values_ = [
+ [None, None, 11, 1.1, 10011.1, "test01"],
+ [True, None, 11111, 1.25, 101.0, "test02"],
+ [False, 100, 1, None, 688.25, "test03"],
+ [True, None, 0, 0, 6.25, None],
+ ]
+ timestamps_ = [16, 17, 18, 19]
+ tablet_ = Tablet(
+ "root.sg_test_01.d_01", measurements_, data_types_, values_, timestamps_
+ )
+ session.execute_non_query_statement("set storage group to root.sg_test_01")
+ session.insert_tablet(tablet_)
+ columns = []
+ for measurement in measurements_:
+ columns.append("root.sg_test_01.d_01." + measurement)
+ df_input = pd.DataFrame(values_, None, columns)
+ df_input.insert(0, "Time", timestamps_)
+
+ session_data_set = session.execute_query_statement(
+ "select s_01, s_02, s_03, s_04, s_05, s_06 from root.sg_test_01.d_01"
+ )
+ df_output = session_data_set.todf()
+ df_output = df_output[df_input.columns.tolist()]
+
+ session.close()
+ assert_frame_equal(df_input, df_output, False)
diff --git a/client-py/tests/test_todf.py b/client-py/tests/test_todf.py
index 73f9fa4f8997d..8972f293c1e01 100644
--- a/client-py/tests/test_todf.py
+++ b/client-py/tests/test_todf.py
@@ -16,10 +16,9 @@
# under the License.
#
-import random
-
import numpy as np
import pandas as pd
+import random
from pandas.testing import assert_frame_equal
from iotdb.IoTDBContainer import IoTDBContainer
@@ -65,10 +64,11 @@ def create_ts(session):
def test_simple_query():
- with IoTDBContainer() as db:
+ with IoTDBContainer("iotdb:dev") as db:
db: IoTDBContainer
session = Session(db.get_container_host_ip(), db.get_exposed_port(6667))
session.open(False)
+ session.execute_non_query_statement("set storage group to root.wt1")
create_ts(session)
@@ -92,7 +92,7 @@ def test_simple_query():
df_input.insert(0, "Time", timestamps)
- session_data_set = session.execute_query_statement("SELECT * FROM root.*")
+ session_data_set = session.execute_query_statement("SELECT ** FROM root")
df_output = session_data_set.todf()
df_output = df_output[df_input.columns.tolist()]
@@ -101,11 +101,11 @@ def test_simple_query():
def test_with_null_query():
- with IoTDBContainer() as db:
+ with IoTDBContainer("iotdb:dev") as db:
db: IoTDBContainer
session = Session(db.get_container_host_ip(), db.get_exposed_port(6667))
session.open(False)
-
+ session.execute_non_query_statement("set storage group to root.wt1")
create_ts(session)
# insert data
@@ -121,7 +121,7 @@ def test_with_null_query():
)
data[ts_path_lst[5]] = np.random.choice(
["text1", "text2"], size=data_nums
- ).astype(np.object)
+ ).astype(object)
data_empty = {}
for ts_path in ts_path_lst:
@@ -133,7 +133,7 @@ def test_with_null_query():
tmp_array = pd.Series(tmp_array).astype("Int64")
elif data[ts_path].dtype == np.float32 or data[ts_path].dtype == np.double:
tmp_array = np.full(data_nums, np.nan, data[ts_path].dtype)
- elif data[ts_path].dtype == np.bool:
+ elif data[ts_path].dtype == bool:
tmp_array = np.full(data_nums, np.nan, np.float32)
tmp_array = pd.Series(tmp_array).astype("boolean")
else:
@@ -171,7 +171,7 @@ def test_with_null_query():
df_input.insert(0, "Time", timestamps)
- session_data_set = session.execute_query_statement("SELECT * FROM root.*")
+ session_data_set = session.execute_query_statement("SELECT ** FROM root")
df_output = session_data_set.todf()
df_output = df_output[df_input.columns.tolist()]
@@ -180,10 +180,11 @@ def test_with_null_query():
def test_multi_fetch():
- with IoTDBContainer() as db:
+ with IoTDBContainer("iotdb:dev") as db:
db: IoTDBContainer
session = Session(db.get_container_host_ip(), db.get_exposed_port(6667))
session.open(False)
+ session.execute_non_query_statement("set storage group to root.wt1")
create_ts(session)
@@ -207,7 +208,7 @@ def test_multi_fetch():
df_input.insert(0, "Time", timestamps)
- session_data_set = session.execute_query_statement("SELECT * FROM root.*")
+ session_data_set = session.execute_query_statement("SELECT ** FROM root")
session_data_set.set_fetch_size(100)
df_output = session_data_set.todf()
df_output = df_output[df_input.columns.tolist()]
diff --git a/cluster/pom.xml b/cluster/pom.xml
index 0e9a74ef598f3..2b53217604daf 100644
--- a/cluster/pom.xml
+++ b/cluster/pom.xml
@@ -24,7 +24,7 @@
org.apache.iotdbiotdb-parent
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmliotdb-cluster
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/RemoteTsFileResource.java b/cluster/src/main/java/org/apache/iotdb/cluster/RemoteTsFileResource.java
index 04fb776a5d66a..ea5a224012f4e 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/RemoteTsFileResource.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/RemoteTsFileResource.java
@@ -23,6 +23,7 @@
import org.apache.iotdb.cluster.utils.NodeSerializeUtils;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
+import org.apache.iotdb.db.engine.storagegroup.TsFileResourceStatus;
import org.apache.iotdb.db.utils.SerializeUtils;
import java.io.DataOutputStream;
@@ -46,14 +47,14 @@ public class RemoteTsFileResource extends TsFileResource {
private boolean isPlanRangeUnique = false;
public RemoteTsFileResource() {
- setClosed(true);
+ setStatus(TsFileResourceStatus.CLOSED);
this.timeIndex = IoTDBDescriptor.getInstance().getConfig().getTimeIndexLevel().getTimeIndex();
}
private RemoteTsFileResource(TsFileResource other) throws IOException {
super(other);
withModification = new File(getModFile().getFilePath()).exists();
- setClosed(true);
+ setStatus(TsFileResourceStatus.CLOSED);
}
public RemoteTsFileResource(TsFileResource other, Node source) throws IOException {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/PartitionedSnapshotLogManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/PartitionedSnapshotLogManager.java
index bc16376177475..9f670bd54e926 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/PartitionedSnapshotLogManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/PartitionedSnapshotLogManager.java
@@ -31,9 +31,9 @@
import org.apache.iotdb.cluster.server.member.DataGroupMember;
import org.apache.iotdb.db.metadata.path.PartialPath;
import org.apache.iotdb.db.service.IoTDB;
-import org.apache.iotdb.db.service.metrics.Metric;
import org.apache.iotdb.db.service.metrics.MetricsService;
-import org.apache.iotdb.db.service.metrics.Tag;
+import org.apache.iotdb.db.service.metrics.enums.Metric;
+import org.apache.iotdb.db.service.metrics.enums.Tag;
import org.apache.iotdb.metrics.config.MetricConfigDescriptor;
import org.apache.iotdb.metrics.utils.MetricLevel;
import org.apache.iotdb.tsfile.write.schema.TimeseriesSchema;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/ElectionHandler.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/ElectionHandler.java
index 9f16f3ff76056..6ad5a665a523a 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/ElectionHandler.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/ElectionHandler.java
@@ -21,9 +21,9 @@
import org.apache.iotdb.cluster.rpc.thrift.Node;
import org.apache.iotdb.cluster.server.member.RaftMember;
-import org.apache.iotdb.db.service.metrics.Metric;
import org.apache.iotdb.db.service.metrics.MetricsService;
-import org.apache.iotdb.db.service.metrics.Tag;
+import org.apache.iotdb.db.service.metrics.enums.Metric;
+import org.apache.iotdb.db.service.metrics.enums.Tag;
import org.apache.iotdb.metrics.config.MetricConfigDescriptor;
import org.apache.iotdb.metrics.utils.MetricLevel;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/ClusterMonitor.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/ClusterMonitor.java
index 18993df5ce15b..db046351911a6 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/ClusterMonitor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/ClusterMonitor.java
@@ -42,9 +42,9 @@
import org.apache.iotdb.db.service.IService;
import org.apache.iotdb.db.service.JMXService;
import org.apache.iotdb.db.service.ServiceType;
-import org.apache.iotdb.db.service.metrics.Metric;
import org.apache.iotdb.db.service.metrics.MetricsService;
-import org.apache.iotdb.db.service.metrics.Tag;
+import org.apache.iotdb.db.service.metrics.enums.Metric;
+import org.apache.iotdb.db.service.metrics.enums.Tag;
import org.apache.iotdb.metrics.config.MetricConfigDescriptor;
import org.apache.iotdb.metrics.utils.MetricLevel;
import org.apache.iotdb.tsfile.utils.Pair;
diff --git a/code-coverage/pom.xml b/code-coverage/pom.xml
index 3428c41446d84..05b1f879c2820 100644
--- a/code-coverage/pom.xml
+++ b/code-coverage/pom.xml
@@ -24,7 +24,7 @@
org.apache.iotdbiotdb-parent
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmliotdb-code-coverage
diff --git a/compile-tools/pom.xml b/compile-tools/pom.xml
index 02ecea0006ab2..be97d4793e1a8 100644
--- a/compile-tools/pom.xml
+++ b/compile-tools/pom.xml
@@ -22,7 +22,7 @@
org.apache.iotdbiotdb-parent
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmlclient-cpp-tools
diff --git a/compile-tools/thrift/pom.xml b/compile-tools/thrift/pom.xml
index 45b9f32720946..d57afbae21248 100644
--- a/compile-tools/thrift/pom.xml
+++ b/compile-tools/thrift/pom.xml
@@ -22,7 +22,7 @@
org.apache.iotdbclient-cpp-tools
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmlclient-cpp-tools-thrift
@@ -65,7 +65,7 @@
wget
- https://archive.apache.org/dist/thrift/${thrift.version}/thrift-${thrift.version}.tar.gz
+ http://archive.apache.org/dist/thrift/${thrift.version}/thrift-${thrift.version}.tar.gztrue${project.build.directory}
diff --git a/cross-tests/pom.xml b/cross-tests/pom.xml
index 408d6d04cf973..cbb61382df0b6 100644
--- a/cross-tests/pom.xml
+++ b/cross-tests/pom.xml
@@ -23,7 +23,7 @@
iotdb-parentorg.apache.iotdb
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT4.0.0cross-tests
diff --git a/cross-tests/src/test/java/org/apache/iotdb/cross/tests/tools/importCsv/ImportCsvTestIT.java b/cross-tests/src/test/java/org/apache/iotdb/cross/tests/tools/importCsv/ImportCsvTestIT.java
index 749ff8c949933..4dad730477b37 100644
--- a/cross-tests/src/test/java/org/apache/iotdb/cross/tests/tools/importCsv/ImportCsvTestIT.java
+++ b/cross-tests/src/test/java/org/apache/iotdb/cross/tests/tools/importCsv/ImportCsvTestIT.java
@@ -190,6 +190,37 @@ public void test() throws IOException, ClassNotFoundException {
}
}
+ /**
+ * test the situation that the schema has been created and CSV file has no problem
+ *
+ * @throws IOException
+ */
+ @Test
+ public void testAligned() throws IOException, ClassNotFoundException {
+ assertTrue(generateTestCSV(false, false, false, false, false));
+ String[] params = {"-f", CSV_FILE, "-aligned", "true"};
+ testMethod(params, null);
+ File file = new File(CSV_FILE);
+ Class.forName(Config.JDBC_DRIVER_NAME);
+ try (Connection connection =
+ DriverManager.getConnection(
+ Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root");
+ Statement statement = connection.createStatement()) {
+ if (statement.execute("show devices")) {
+ ResultSet resultSet = statement.getResultSet();
+ while (resultSet.next()) {
+ assertTrue("true".equals(resultSet.getString(2)));
+ }
+ resultSet.close();
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ if (file.exists()) {
+ file.delete();
+ }
+ }
+
/**
* test the situation that the schema has not been created and CSV file has no problem
*
diff --git a/distribution/pom.xml b/distribution/pom.xml
index ca14e929e624a..de02e0c1d3054 100644
--- a/distribution/pom.xml
+++ b/distribution/pom.xml
@@ -24,7 +24,7 @@
org.apache.iotdbiotdb-parent
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmliotdb-distribution
diff --git a/distribution/src/assembly/all.xml b/distribution/src/assembly/all.xml
index 098b0df9f3e76..dcd04e93ad9bc 100644
--- a/distribution/src/assembly/all.xml
+++ b/distribution/src/assembly/all.xml
@@ -55,6 +55,10 @@
conf${maven.multiModuleProjectDirectory}/metrics/interface/src/main/assembly/resources/conf
+
+ grafana-metrics-example
+ ${maven.multiModuleProjectDirectory}/grafana-metrics-example
+ sbin${maven.multiModuleProjectDirectory}/server/src/assembly/resources/sbin
diff --git a/distribution/src/assembly/server.xml b/distribution/src/assembly/server.xml
index 2946ebb637a78..adbeb5c90f7a7 100644
--- a/distribution/src/assembly/server.xml
+++ b/distribution/src/assembly/server.xml
@@ -46,6 +46,10 @@
conf${maven.multiModuleProjectDirectory}/metrics/interface/src/main/assembly/resources/conf
+
+ grafana-metrics-example
+ ${maven.multiModuleProjectDirectory}/grafana-metrics-example
+ sbin${maven.multiModuleProjectDirectory}/server/src/assembly/resources/sbin
diff --git a/docker/src/main/Dockerfile-0.13.0-cluster b/docker/src/main/Dockerfile-0.13.0-cluster
new file mode 100644
index 0000000000000..15600260f88b6
--- /dev/null
+++ b/docker/src/main/Dockerfile-0.13.0-cluster
@@ -0,0 +1,53 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+FROM openjdk:11-jre-slim
+RUN apt update \
+ # procps is for `free` command
+ && apt install wget unzip lsof procps -y \
+ && wget https://downloads.apache.org/iotdb/0.13.0/apache-iotdb-0.13.0-cluster-bin.zip \
+ # if you are in China, use the following URL
+ #&& wget https://mirrors.tuna.tsinghua.edu.cn/apache/iotdb/0.13.0/apache-iotdb-0.13.0-cluster-bin.zip \
+ && unzip apache-iotdb-0.13.0-cluster-bin.zip \
+ && rm apache-iotdb-0.13.0-cluster-bin.zip \
+ && mv apache-iotdb-0.13.0-cluster-bin /iotdb \
+ && apt remove wget unzip -y \
+ && apt autoremove -y \
+ && apt purge --auto-remove -y \
+ && apt clean -y \
+ # modify the seeds in configuration file
+ && sed -i '/^seed_nodes/cseed_nodes=127.0.0.1:9003' /iotdb/conf/iotdb-cluster.properties \
+ && sed -i '/^default_replica_num/cdefault_replica_num=1' /iotdb/conf/iotdb-cluster.properties
+
+# rpc port
+EXPOSE 6667
+# JMX port
+EXPOSE 31999
+# sync port
+EXPOSE 5555
+# monitor port
+EXPOSE 8181
+# internal meta port
+EXPOSE 9003
+# internal data port
+EXPOSE 40010
+VOLUME /iotdb/data
+VOLUME /iotdb/logs
+ENV PATH="/iotdb/sbin/:/iotdb/tools/:${PATH}"
+ENTRYPOINT ["/iotdb/sbin/start-node.sh"]
diff --git a/docker/src/main/Dockerfile-0.13.0-grafana-connector b/docker/src/main/Dockerfile-0.13.0-grafana-connector
new file mode 100644
index 0000000000000..719f63bca54cc
--- /dev/null
+++ b/docker/src/main/Dockerfile-0.13.0-grafana-connector
@@ -0,0 +1,41 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+FROM openjdk:11-jre-slim
+RUN apt update \
+ # procps is for `free` command
+ && apt install wget unzip lsof procps -y \
+ && wget https://downloads.apache.org/iotdb/0.13.0/apache-iotdb-0.13.0-grafana-connector-bin.zip \
+ # if you are in China, use the following URL
+ #&& wget https://mirrors.tuna.tsinghua.edu.cn/apache/iotdb/0.13.0/apache-iotdb-0.13.0-grafana-connector-bin.zip \
+ && unzip apache-iotdb-0.13.0-grafana-connector-bin.zip \
+ && rm apache-iotdb-0.13.0-grafana-connector-bin.zip \
+ && mv apache-iotdb-0.13.0-grafana-connector-bin /iotdb-grafana-connector \
+ && apt remove wget unzip -y \
+ && apt autoremove -y \
+ && apt purge --auto-remove -y \
+ && apt clean -y
+# rpc port
+EXPOSE 8888
+VOLUME /iotdb-grafana-connector/config
+RUN echo "#!/bin/bash" > /iotdb-grafana-connector/runboot.sh
+RUN echo "java -Djava.security.egd=file:/dev/./urandom -jar /iotdb-grafana-connector/iotdb-grafana-connector.war" >> /iotdb-grafana-connector/runboot.sh
+RUN chmod a+x /iotdb-grafana-connector/runboot.sh
+WORKDIR /iotdb-grafana-connector
+ENTRYPOINT ["./runboot.sh"]
diff --git a/docker/src/main/Dockerfile-0.13.0-node b/docker/src/main/Dockerfile-0.13.0-node
new file mode 100644
index 0000000000000..97ee772cd5063
--- /dev/null
+++ b/docker/src/main/Dockerfile-0.13.0-node
@@ -0,0 +1,45 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+FROM openjdk:11-jre-slim
+RUN apt update \
+ # procps is for `free` command
+ && apt install wget unzip lsof procps -y \
+ && wget https://downloads.apache.org/iotdb/0.13.0/apache-iotdb-0.13.0-server-bin.zip \
+ # if you are in China, use the following URL
+ #&& wget https://mirrors.tuna.tsinghua.edu.cn/apache/iotdb/0.13.0/apache-iotdb-0.13.0-server-bin.zip \
+ && unzip apache-iotdb-0.13.0-server-bin.zip \
+ && rm apache-iotdb-0.13.0-server-bin.zip \
+ && mv apache-iotdb-0.13.0-server-bin /iotdb \
+ && apt remove wget unzip -y \
+ && apt autoremove -y \
+ && apt purge --auto-remove -y \
+ && apt clean -y
+# rpc port
+EXPOSE 6667
+# JMX port
+EXPOSE 31999
+# sync port
+EXPOSE 5555
+# monitor port
+EXPOSE 8181
+VOLUME /iotdb/data
+VOLUME /iotdb/logs
+ENV PATH="/iotdb/sbin/:/iotdb/tools/:${PATH}"
+ENTRYPOINT ["/iotdb/sbin/start-server.sh"]
diff --git a/docs/UserGuide/API/Programming-Java-Native-API.md b/docs/UserGuide/API/Programming-Java-Native-API.md
index 90b4c1e4a0d5b..260ef3c76ab10 100644
--- a/docs/UserGuide/API/Programming-Java-Native-API.md
+++ b/docs/UserGuide/API/Programming-Java-Native-API.md
@@ -514,6 +514,7 @@ If you can not get a session connection in 60 seconds, there is a warning log bu
If a session has finished an operation, it will be put back to the pool automatically.
If a session connection is broken, the session will be removed automatically and the pool will try
to create a new session and redo the operation.
+You can also specify an url list of multiple reachable nodes when creating a SessionPool, just as you would when creating a Session. To ensure high availability of clients in distributed cluster.
For query operations:
diff --git a/docs/UserGuide/API/Programming-Python-Native-API.md b/docs/UserGuide/API/Programming-Python-Native-API.md
index 19299da7efeea..fc5e80c161ccc 100644
--- a/docs/UserGuide/API/Programming-Python-Native-API.md
+++ b/docs/UserGuide/API/Programming-Python-Native-API.md
@@ -29,7 +29,9 @@ You have to install thrift (>=0.13) before using the package.
### How to use (Example)
-First, download the package: `pip3 install apache-iotdb`
+First, download the latest package: `pip3 install apache-iotdb`
+
+*Notice: If you are installing Python API v0.13.0, DO NOT install by `pip install apache-iotdb==0.13.0`, use `pip install apache-iotdb==0.13.0.post1` instead!*
You can get an example of using the package to read and write data at here: [Example](https://github.com/apache/iotdb/blob/master/client-py/SessionExample.py)
@@ -44,8 +46,8 @@ from iotdb.Session import Session
ip = "127.0.0.1"
port_ = "6667"
-username_ = 'root'
-password_ = 'root'
+username_ = "root"
+password_ = "root"
session = Session(ip, port_, username_, password_)
session.open(False)
zone = session.get_time_zone()
@@ -96,20 +98,20 @@ session.delete_storage_groups(group_name_lst)
```python
session.create_time_series(ts_path, data_type, encoding, compressor,
- props=None, tags=None, attributes=None, alias=None)
+ props=None, tags=None, attributes=None, alias=None)
session.create_multi_time_series(
- ts_path_lst, data_type_lst, encoding_lst, compressor_lst,
- props_lst=None, tags_lst=None, attributes_lst=None, alias_lst=None
- )
+ ts_path_lst, data_type_lst, encoding_lst, compressor_lst,
+ props_lst=None, tags_lst=None, attributes_lst=None, alias_lst=None
+)
```
* Create aligned timeseries
```python
session.create_aligned_time_series(
- device_id, measurements_lst, data_type_lst, encoding_lst, compressor_lst
- )
+ device_id, measurements_lst, data_type_lst, encoding_lst, compressor_lst
+)
```
Attention: Alias of measurements are **not supported** currently.
@@ -128,7 +130,7 @@ session.check_time_series_exists(path)
### Data Manipulation Interface (DML Interface)
-##### Insert
+#### Insert
It is recommended to use insertTablet to help improve write efficiency.
@@ -156,13 +158,13 @@ session.insert_tablet(tablet_)
```
* Numpy Tablet
-Comparing with Tablet, Numpy Tablet is using [numpy ndarray](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html) to record data.
+Comparing with Tablet, Numpy Tablet is using [numpy.ndarray](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html) to record data.
With less memory footprint and time cost of serialization, the insert performance will be better.
**Notice**
1. time and numerical value columns in Tablet is ndarray
-2. ndarray should be big-endian, see the example below
-3. TEXT type cannot be ndarray
+2. recommended to use the specific dtypes to each ndarray, see the example below
+ (if not, the default dtypes are also ok).
```python
data_types_ = [
@@ -174,16 +176,16 @@ data_types_ = [
TSDataType.TEXT,
]
np_values_ = [
- np.array([False, True, False, True], np.dtype('>?')),
- np.array([10, 100, 100, 0], np.dtype('>i4')),
- np.array([11, 11111, 1, 0], np.dtype('>i8')),
- np.array([1.1, 1.25, 188.1, 0], np.dtype('>f4')),
- np.array([10011.1, 101.0, 688.25, 6.25], np.dtype('>f8')),
- ["test01", "test02", "test03", "test04"],
+ np.array([False, True, False, True], TSDataType.BOOLEAN.np_dtype()),
+ np.array([10, 100, 100, 0], TSDataType.INT32.np_dtype()),
+ np.array([11, 11111, 1, 0], TSDataType.INT64.np_dtype()),
+ np.array([1.1, 1.25, 188.1, 0], TSDataType.FLOAT.np_dtype()),
+ np.array([10011.1, 101.0, 688.25, 6.25], TSDataType.DOUBLE.np_dtype()),
+ np.array(["test01", "test02", "test03", "test04"], TSDataType.TEXT.np_dtype()),
]
-np_timestamps_ = np.array([1, 2, 3, 4], np.dtype('>i8'))
+np_timestamps_ = np.array([1, 2, 3, 4], TSDataType.INT64.np_dtype())
np_tablet_ = NumpyTablet(
- "root.sg_test_01.d_02", measurements_, data_types_, np_values_, np_timestamps_
+ "root.sg_test_01.d_02", measurements_, data_types_, np_values_, np_timestamps_
)
session.insert_tablet(np_tablet_)
```
@@ -205,7 +207,7 @@ session.insert_record(device_id, timestamp, measurements_, data_types_, values_)
```python
session.insert_records(
device_ids_, time_list_, measurements_list_, data_type_list_, values_list_
- )
+)
```
* Insert multiple Records that belong to the same device.
@@ -264,8 +266,8 @@ from iotdb.Session import Session
ip = "127.0.0.1"
port_ = "6667"
-username_ = 'root'
-password_ = 'root'
+username_ = "root"
+password_ = "root"
session = Session(ip, port_, username_, password_)
session.open(False)
result = session.execute_query_statement("SELECT * FROM root.*")
@@ -290,7 +292,7 @@ class MyTestCase(unittest.TestCase):
def test_something(self):
with IoTDBContainer() as c:
- session = Session('localhost', c.get_exposed_port(6667), 'root', 'root')
+ session = Session("localhost", c.get_exposed_port(6667), "root", "root")
session.open(False)
result = session.execute_query_statement("SHOW TIMESERIES")
print(result)
@@ -349,8 +351,8 @@ from iotdb.Session import Session
ip = "127.0.0.1"
port_ = "6667"
-username_ = 'root'
-password_ = 'root'
+username_ = "root"
+password_ = "root"
session = Session(ip, port_, username_, password_)
session.open(False)
zone = session.get_time_zone()
diff --git a/docs/UserGuide/Data-Concept/Data-Model-and-Terminology.md b/docs/UserGuide/Data-Concept/Data-Model-and-Terminology.md
index 4ef3fa88972e3..4b070a858ccb0 100644
--- a/docs/UserGuide/Data-Concept/Data-Model-and-Terminology.md
+++ b/docs/UserGuide/Data-Concept/Data-Model-and-Terminology.md
@@ -100,7 +100,7 @@ In order to make it easier and faster to express multiple timeseries paths, IoTD
`*` represents one layer. For example, `root.vehicle.*.sensor1` represents a 4-layer path which is prefixed with `root.vehicle` and suffixed with `sensor1`.
-`**` represents (`*`)+, which is one or more layers of `*`. For example, `root.vehicle.device1.*` represents all paths prefixed by `root.vehicle.device1` with layers greater than or equal to 4, like `root.vehicle.device1.*`, `root.vehicle.device1.*.*`, `root.vehicle.device1.*.*.*`, etc; `root.vehicle.**.sensor1` represents a path which is prefixed with `root.vehicle` and suffixed with `sensor1` and has at least 4 layers.
+`**` represents (`*`)+, which is one or more layers of `*`. For example, `root.vehicle.device1.**` represents all paths prefixed by `root.vehicle.device1` with layers greater than or equal to 4, like `root.vehicle.device1.*`, `root.vehicle.device1.*.*`, `root.vehicle.device1.*.*.*`, etc; `root.vehicle.**.sensor1` represents a path which is prefixed with `root.vehicle` and suffixed with `sensor1` and has at least 4 layers.
> Note1: Wildcard `*` and `**` cannot be placed at the beginning of the path.
diff --git a/docs/UserGuide/Ecosystem Integration/DBeaver.md b/docs/UserGuide/Ecosystem Integration/DBeaver.md
index db1839a65af9b..25159978d59e5 100644
--- a/docs/UserGuide/Ecosystem Integration/DBeaver.md
+++ b/docs/UserGuide/Ecosystem Integration/DBeaver.md
@@ -51,23 +51,25 @@ DBeaver is a SQL client software application and a database administration tool.

-5. Add libs listed below and click `Find Class`.
-
- Another way is after running `mvn clean package -pl jdbc -am -DskipTests -P get-jar-with-dependencies`, finding and adding a lib names `iotdb-jdbc-{version}-jar-with-dependencies.jar` under `iotdb/jdbc/target/`.
+5. Download [Sources](https://iotdb.apache.org/Download/),unzip it and compile jdbc driver by the following command
+ ```shell
+ mvn clean package -pl jdbc -am -DskipTests -P get-jar-with-dependencies
+ ```
+6. Find and add a lib named `apache-iotdb-jdbc-{version}-jar-with-dependencies.jar`, which should be under `jdbc/target/`, then select `Find Class`.
- 
+ 
-6. Edit the driver Settings
+8. Edit the driver Settings

-7. Open New DataBase Connection and select iotdb
+9. Open New DataBase Connection and select iotdb

-8. Edit JDBC Connection Settings
-
+10. Edit JDBC Connection Settings
+
```
JDBC URL: jdbc:iotdb://127.0.0.1:6667/
Username: root
@@ -75,10 +77,10 @@ DBeaver is a SQL client software application and a database administration tool.
```

-9. Test Connection
+11. Test Connection

-10. Enjoy IoTDB with DBeaver
+12. Enjoy IoTDB with DBeaver

diff --git a/docs/UserGuide/Ecosystem Integration/Grafana Plugin.md b/docs/UserGuide/Ecosystem Integration/Grafana Plugin.md
index 318df9573f5c2..998ab76b570f1 100644
--- a/docs/UserGuide/Ecosystem Integration/Grafana Plugin.md
+++ b/docs/UserGuide/Ecosystem Integration/Grafana Plugin.md
@@ -86,19 +86,20 @@ If compiling successful, you can see that the `distribution/target` directory co
#### Install Grafana-Plugin
-* Copy the front-end project target folder generated above to Grafana's plugin directory `${Grafana directory}\data\plugins\`
- * Windows: the `data\plugins` directory is automatically created
- * Linux: artificially create `/var/lib/grafana/plugins` directory
- * MacOS: the plugin directory is `/usr/local/var/lib/grafana/plugins` (see more details after installing Grafana using `brew install`)
+* Copy the front-end project target folder generated above to Grafana's plugin directory `${Grafana directory}\data\plugins\`.If there is no such directory, you can manually create it or start grafana and it will be created automatically. Of course, you can also modify the location of plugins. For details, please refer to the following instructions for modifying the location of Grafana's plugin directory.
* Modify Grafana configuration file: the file is in(`${Grafana directory}\conf\defaults.ini`), and do the following modifications:
```ini
allow_loading_unsigned_plugins = iotdb
```
+* Modify the location of Grafana's plugin directory: the file is in(`${Grafana directory}\conf\defaults.ini`), and do the following modifications:
+ ```ini
+ plugins = data/plugins
+ ```
* Start Grafana (restart if the Grafana service is already started)
-
+ For more details,please click [here](https://grafana.com/docs/grafana/latest/plugins/installation/)
#### Start Grafana
diff --git a/docs/UserGuide/Maintenance-Tools/Metric-Tool.md b/docs/UserGuide/Maintenance-Tools/Metric-Tool.md
index 1758c1f667685..d7fa3ef22267a 100644
--- a/docs/UserGuide/Maintenance-Tools/Metric-Tool.md
+++ b/docs/UserGuide/Maintenance-Tools/Metric-Tool.md
@@ -78,116 +78,133 @@ Next, we will choose Prometheus format data as samples to describe each kind of
#### 4.3.1. API
-| Metric | Tag | level | Description | Sample |
-| ------------------- | --------------------- | ------ | ---------------------------------------- | -------------------------------------------- |
+| Metric | Tag | level | Description | Sample |
+| ------------------- | --------------------- | --------- | ---------------------------------------- | -------------------------------------------- |
| entry_seconds_count | name="interface name" | important | The total request count of the interface | entry_seconds_count{name="openSession",} 1.0 |
| entry_seconds_sum | name="interface name" | important | The total cost seconds of the interface | entry_seconds_sum{name="openSession",} 0.024 |
| entry_seconds_max | name="interface name" | important | The max latency of the interface | entry_seconds_max{name="openSession",} 0.024 |
| quantity_total | name="pointsIn" | important | The total points inserted into IoTDB | quantity_total{name="pointsIn",} 1.0 |
-#### 4.3.2. File
+#### 4.3.2. Task
+| Metric | Tag | level | Description | Sample |
+| ----------------------- | ----------------------------------------------------------------------------- | --------- | -------------------------------------------------------- | --------------------------------------------------------------------------------------- |
+| queue | name="compaction_inner/compaction_cross/flush", status="running/waiting" | important | The count of current tasks in running and waiting status | queue{name="flush",status="waiting",} 0.0 queue{name="flush",status="running",} 0.0 |
+| cost_task_seconds_count | name="compaction/flush" | important | The total count of tasks occurs till now | cost_task_seconds_count{name="flush",} 1.0 |
+| cost_task_seconds_max | name="compaction/flush" | important | The seconds of the longest task takes till now | cost_task_seconds_max{name="flush",} 0.363 |
+| cost_task_seconds_sum | name="compaction/flush" | important | The total cost seconds of all tasks till now | cost_task_seconds_sum{name="flush",} 0.363 |
+| data_written | name="compaction", type="aligned/not-aligned/total" | important | The size of data written in compaction | data_written{name="compaction",type="total",} 10240 |
+| data_read | name="compaction" | important | The size of data read in compaction | data_read={name="compaction",} 10240 |
-| Metric | Tag | level | Description | Sample |
-| ---------- | -------------------- | ------ | ----------------------------------------------- | --------------------------- |
-| file_size | name="wal/seq/unseq" | important | The current file size of wal/seq/unseq in bytes | file_size{name="wal",} 67.0 |
-| file_count | name="wal/seq/unseq" | important | The current count of wal/seq/unseq files | file_count{name="seq",} 1.0 |
-
-#### 4.3.3. Flush
-
-| Metric | Tag | level | Description | Sample |
-| ----------------------- | ------------------------------------------- | ------ | ----------------------------------------------------------------- | --------------------------------------------------------------------------------------- |
-| queue | name="flush", status="running/waiting" | important | The count of current flushing tasks in running and waiting status | queue{name="flush",status="waiting",} 0.0 queue{name="flush",status="running",} 0.0 |
-| cost_task_seconds_count | name="flush" | important | The total count of flushing occurs till now | cost_task_seconds_count{name="flush",} 1.0 |
-| cost_task_seconds_max | name="flush" | important | The seconds of the longest flushing task takes till now | cost_task_seconds_max{name="flush",} 0.363 |
-| cost_task_seconds_sum | name="flush" | important | The total cost seconds of all flushing tasks till now | cost_task_seconds_sum{name="flush",} 0.363 |
+#### 4.3.3. Memory Usage
-#### 4.3.4. Compaction
-
-| Metric | Tag | level | Description | Sample |
-| ----------------------- | ----------------------------------------------------------------------- | ------ | ------------------------------------------------------------------- | ---------------------------------------------------- |
-| queue | name="compaction_inner/compaction_cross", status="running/waiting" | important | The count of current compaction tasks in running and waiting status | queue{name="compaction_inner",status="waiting",} 0.0 |
-| cost_task_seconds_count | name="compaction" | important | The total count of compaction occurs till now | cost_task_seconds_count{name="compaction",} 1.0 |
-| cost_task_seconds_max | name="compaction" | important | The seconds of the longest compaction task takes till now | cost_task_seconds_max{name="compaction",} 0.363 |
-| cost_task_seconds_sum | name="compaction" | important | The total cost seconds of all compaction tasks till now | cost_task_seconds_sum{name="compaction",} 0.363 |
-
-#### 4.3.5. Memory Usage
-
-| Metric | Tag | level | Description | Sample |
-| ------ | --------------------------------------- | ------ | --------------------------------------------------------------------- | --------------------------------- |
+| Metric | Tag | level | Description | Sample |
+| ------ | --------------------------------------- | --------- | --------------------------------------------------------------------- | --------------------------------- |
| mem | name="chunkMetaData/storageGroup/mtree" | important | Current memory size of chunkMetaData/storageGroup/mtree data in bytes | mem{name="chunkMetaData",} 2050.0 |
-#### 4.3.6. Cache Hit Ratio
+#### 4.3.4. Cache Hit Ratio
-| Metric | Tag | level | Description | Sample |
-| --------- | --------------------------------------- | ------ | ----------------------------------------------------------------------------- | --------------------------- |
+| Metric | Tag | level | Description | Sample |
+| --------- | --------------------------------------- | --------- | ----------------------------------------------------------------------------- | --------------------------- |
| cache_hit | name="chunk/timeSeriesMeta/bloomFilter" | important | Cache hit ratio of chunk/timeSeriesMeta and prevention ratio of bloom filter | cache_hit{name="chunk",} 80 |
-#### 4.3.7. Business Data
+#### 4.3.5. Business Data
-| Metric | Tag | level | Description | Sample |
-| -------- | ------------------------------------- | ------ | ------------------------------------------------------------- | -------------------------------- |
+| Metric | Tag | level | Description | Sample |
+| -------- | ------------------------------------- | --------- | ------------------------------------------------------------- | -------------------------------- |
| quantity | name="timeSeries/storageGroup/device" | important | The current count of timeSeries/storageGroup/devices in IoTDB | quantity{name="timeSeries",} 1.0 |
-#### 4.3.8. Cluster
+#### 4.3.6. Cluster
-| Metric | Tag | level | Description | Sample |
-| ------------------------- | ------------------------------- | ------ | -------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------- |
+| Metric | Tag | level | Description | Sample |
+| ------------------------- | ------------------------------- | --------- | -------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------- |
| cluster_node_leader_count | name="{{ip}}" | important | The count of ```dataGroupLeader``` on each node, which reflects the distribution of leaders | cluster_node_leader_count{name="127.0.0.1",} 2.0 |
| cluster_uncommitted_log | name="{{ip_datagroupHeader}}" | important | The count of ```uncommitted_log``` on each node in data groups it belongs to | cluster_uncommitted_log{name="127.0.0.1_Data-127.0.0.1-40010-raftId-0",} 0.0 |
| cluster_node_status | name="{{ip}}" | important | The current node status, 1=online 2=offline | cluster_node_status{name="127.0.0.1",} 1.0 |
| cluster_elect_total | name="{{ip}}",status="fail/win" | important | The count and result (won or failed) of elections the node participated in. | cluster_elect_total{name="127.0.0.1",status="win",} 1.0 |
### 4.4. IoTDB PreDefined Metrics Set
-Users can modify the value of `predefinedMetrics` in the `iotdb-metric.yml` file to enable the predefined set of metrics, which `LOGBACK` does not support in `dropwizard`.
+Users can modify the value of `predefinedMetrics` in the `iotdb-metric.yml` file to enable the predefined set of metrics,now support `JVM`, `LOGBACK`, `FILE`, `PROCESS`, `SYSYTEM`.
#### 4.4.1. JVM
##### 4.4.1.1. Threads
-| Metric | Tag | Description | Sample |
-| -------------------------- | ------------------------------------------------------------- | ------------------------------------ | -------------------------------------------------- |
-| jvm_threads_live_threads | None | The current count of threads | jvm_threads_live_threads 25.0 |
-| jvm_threads_daemon_threads | None | The current count of daemon threads | jvm_threads_daemon_threads 12.0 |
-| jvm_threads_peak_threads | None | The max count of threads till now | jvm_threads_peak_threads 28.0 |
-| jvm_threads_states_threads | state="runnable/blocked/waiting/timed-waiting/new/terminated" | The count of threads in each status | jvm_threads_states_threads{state="runnable",} 10.0 |
+| Metric | Tag | level | Description | Sample |
+| -------------------------- | ------------------------------------------------------------- | --------- | ------------------------------------ | -------------------------------------------------- |
+| jvm_threads_live_threads | None | Important | The current count of threads | jvm_threads_live_threads 25.0 |
+| jvm_threads_daemon_threads | None | Important | The current count of daemon threads | jvm_threads_daemon_threads 12.0 |
+| jvm_threads_peak_threads | None | Important | The max count of threads till now | jvm_threads_peak_threads 28.0 |
+| jvm_threads_states_threads | state="runnable/blocked/waiting/timed-waiting/new/terminated" | Important | The count of threads in each status | jvm_threads_states_threads{state="runnable",} 10.0 |
##### 4.4.1.2. GC
-| Metric | Tag | Description | Sample |
-| ----------------------------------- | ------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- |
-| jvm_gc_pause_seconds_count | action="end of major GC/end of minor GC",cause="xxxx" | The total count of YGC/FGC events and its cause | jvm_gc_pause_seconds_count{action="end of major GC",cause="Metadata GC Threshold",} 1.0 |
-| jvm_gc_pause_seconds_sum | action="end of major GC/end of minor GC",cause="xxxx" | The total cost seconds of YGC/FGC and its cause | jvm_gc_pause_seconds_sum{action="end of major GC",cause="Metadata GC Threshold",} 0.03 |
-| jvm_gc_pause_seconds_max | action="end of major GC",cause="Metadata GC Threshold" | The max cost seconds of YGC/FGC till now and its cause | jvm_gc_pause_seconds_max{action="end of major GC",cause="Metadata GC Threshold",} 0.0 |
-| jvm_gc_overhead_percent | None | An approximation of the percent of CPU time used by GC activities over the last lookback period or since monitoring began, whichever is shorter, in the range [0..1] | jvm_gc_overhead_percent 0.0 |
-| jvm_gc_memory_promoted_bytes_total | None | Count of positive increases in the size of the old generation memory pool before GC to after GC | jvm_gc_memory_promoted_bytes_total 8425512.0 |
-| jvm_gc_max_data_size_bytes | None | Max size of long-lived heap memory pool | jvm_gc_max_data_size_bytes 2.863661056E9 |
-| jvm_gc_live_data_size_bytes | 无 | Size of long-lived heap memory pool after reclamation | jvm_gc_live_data_size_bytes 8450088.0 |
-| jvm_gc_memory_allocated_bytes_total | None | Incremented for an increase in the size of the (young) heap memory pool after one GC to before the next | jvm_gc_memory_allocated_bytes_total 4.2979144E7 |
+| Metric | Tag | level | Description | Sample |
+| ----------------------------------- | ------------------------------------------------------ | --------- | ------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- |
+| jvm_gc_pause_seconds_count | action="end of major GC/end of minor GC",cause="xxxx" | Important | The total count of YGC/FGC events and its cause | jvm_gc_pause_seconds_count{action="end of major GC",cause="Metadata GC Threshold",} 1.0 |
+| jvm_gc_pause_seconds_sum | action="end of major GC/end of minor GC",cause="xxxx" | Important | The total cost seconds of YGC/FGC and its cause | jvm_gc_pause_seconds_sum{action="end of major GC",cause="Metadata GC Threshold",} 0.03 |
+| jvm_gc_pause_seconds_max | action="end of major GC",cause="Metadata GC Threshold" | Important | The max cost seconds of YGC/FGC till now and its cause | jvm_gc_pause_seconds_max{action="end of major GC",cause="Metadata GC Threshold",} 0.0 |
+| jvm_gc_memory_promoted_bytes_total | None | Important | Count of positive increases in the size of the old generation memory pool before GC to after GC | jvm_gc_memory_promoted_bytes_total 8425512.0 |
+| jvm_gc_max_data_size_bytes | None | Important | Max size of long-lived heap memory pool | jvm_gc_max_data_size_bytes 2.863661056E9 |
+| jvm_gc_live_data_size_bytes | None | Important | Size of long-lived heap memory pool after reclamation | jvm_gc_live_data_size_bytes 8450088.0 |
+| jvm_gc_memory_allocated_bytes_total | None | Important | Incremented for an increase in the size of the (young) heap memory pool after one GC to before the next | jvm_gc_memory_allocated_bytes_total 4.2979144E7 |
##### 4.4.1.3. Memory
-| Metric | Tag | Description | Sample |
-| ------------------------------- | ------------------------------- | ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| jvm_buffer_memory_used_bytes | id="direct/mapped" | An estimate of the memory that the Java virtual machine is using for this buffer pool | jvm_buffer_memory_used_bytes{id="direct",} 3.46728099E8 |
-| jvm_buffer_total_capacity_bytes | id="direct/mapped" | An estimate of the total capacity of the buffers in this pool | jvm_buffer_total_capacity_bytes{id="mapped",} 0.0 |
-| jvm_buffer_count_buffers | id="direct/mapped" | An estimate of the number of buffers in the pool | jvm_buffer_count_buffers{id="direct",} 183.0 |
-| jvm_memory_committed_bytes | {area="heap/nonheap",id="xxx",} | The amount of memory in bytes that is committed for the Java virtual machine to use | jvm_memory_committed_bytes{area="heap",id="Par Survivor Space",} 2.44252672E8 jvm_memory_committed_bytes{area="nonheap",id="Metaspace",} 3.9051264E7 |
-| jvm_memory_max_bytes | {area="heap/nonheap",id="xxx",} | The maximum amount of memory in bytes that can be used for memory management | jvm_memory_max_bytes{area="heap",id="Par Survivor Space",} 2.44252672E8 jvm_memory_max_bytes{area="nonheap",id="Compressed Class Space",} 1.073741824E9 |
-| jvm_memory_used_bytes | {area="heap/nonheap",id="xxx",} | The amount of used memory | jvm_memory_used_bytes{area="heap",id="Par Eden Space",} 1.000128376E9 jvm_memory_used_bytes{area="nonheap",id="Code Cache",} 2.9783808E7 |
+| Metric | Tag | level | Description | Sample |
+| ------------------------------- | ------------------------------- | --------- | ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| jvm_buffer_memory_used_bytes | id="direct/mapped" | Important | An estimate of the memory that the Java virtual machine is using for this buffer pool | jvm_buffer_memory_used_bytes{id="direct",} 3.46728099E8 |
+| jvm_buffer_total_capacity_bytes | id="direct/mapped" | Important | An estimate of the total capacity of the buffers in this pool | jvm_buffer_total_capacity_bytes{id="mapped",} 0.0 |
+| jvm_buffer_count_buffers | id="direct/mapped" | Important | An estimate of the number of buffers in the pool | jvm_buffer_count_buffers{id="direct",} 183.0 |
+| jvm_memory_committed_bytes | {area="heap/nonheap",id="xxx",} | Important | The amount of memory in bytes that is committed for the Java virtual machine to use | jvm_memory_committed_bytes{area="heap",id="Par Survivor Space",} 2.44252672E8 jvm_memory_committed_bytes{area="nonheap",id="Metaspace",} 3.9051264E7 |
+| jvm_memory_max_bytes | {area="heap/nonheap",id="xxx",} | Important | The maximum amount of memory in bytes that can be used for memory management | jvm_memory_max_bytes{area="heap",id="Par Survivor Space",} 2.44252672E8 jvm_memory_max_bytes{area="nonheap",id="Compressed Class Space",} 1.073741824E9 |
+| jvm_memory_used_bytes | {area="heap/nonheap",id="xxx",} | Important | The amount of used memory | jvm_memory_used_bytes{area="heap",id="Par Eden Space",} 1.000128376E9 jvm_memory_used_bytes{area="nonheap",id="Code Cache",} 2.9783808E7 |
##### 4.4.1.4. Classes
-| Metric | Tag | Description | Sample |
-| ---------------------------------- | --------------------------------------------- | ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- |
-| jvm_classes_unloaded_classes_total | 无 | The total number of classes unloaded since the Java virtual machine has started execution | jvm_classes_unloaded_classes_total 680.0 |
-| jvm_classes_loaded_classes | 无 | The number of classes that are currently loaded in the Java virtual machine | jvm_classes_loaded_classes 5975.0 |
-| jvm_compilation_time_ms_total | {compiler="HotSpot 64-Bit Tiered Compilers",} | The approximate accumulated elapsed time spent in compilation | jvm_compilation_time_ms_total{compiler="HotSpot 64-Bit Tiered Compilers",} 107092.0 |
+| Metric | Tag | level | Description | Sample |
+| ---------------------------------- | --------------------------------------------- | --------- | ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- |
+| jvm_classes_unloaded_classes_total | None | Important | The total number of classes unloaded since the Java virtual machine has started execution | jvm_classes_unloaded_classes_total 680.0 |
+| jvm_classes_loaded_classes | None | Important | The number of classes that are currently loaded in the Java virtual machine | jvm_classes_loaded_classes 5975.0 |
+| jvm_compilation_time_ms_total | {compiler="HotSpot 64-Bit Tiered Compilers",} | Important | The approximate accumulated elapsed time spent in compilation | jvm_compilation_time_ms_total{compiler="HotSpot 64-Bit Tiered Compilers",} 107092.0 |
-#### 4.4.2. Log Events
+#### 4.4.2. File
-| Metric | Tag | Description | Sample |
-| -------------------- | -------------------------------------- | ------------------------------------------------------------- | --------------------------------------- |
-| logback_events_total | {level="trace/debug/info/warn/error",} | The count of trace/debug/info/warn/error log events till now | logback_events_total{level="warn",} 0.0 |
+| Metric | Tag | level | Description | Sample |
+| ---------- | -------------------- | --------- | ----------------------------------------------- | --------------------------- |
+| file_size | name="wal/seq/unseq" | important | The current file size of wal/seq/unseq in bytes | file_size{name="wal",} 67.0 |
+| file_count | name="wal/seq/unseq" | important | The current count of wal/seq/unseq files | file_count{name="seq",} 1.0 |
+
+#### 4.4.3. Logback
+
+| Metric | Tag | level | Description | 示例 |
+| -------------------- | -------------------------------------- | --------- | ------------------------------------------------------------- | --------------------------------------- |
+| logback_events_total | {level="trace/debug/info/warn/error",} | Important | The count of trace/debug/info/warn/error log events till now | logback_events_total{level="warn",} 0.0 |
+
+#### 4.4.4. Process
+| Metric | Tag | level | Description | 示例 |
+| --------------------- | -------------- | ----- | ----------------------------------------------------------------------------- | ----------------------------------------------- |
+| process_cpu_load | name="cpu" | core | current process CPU Usage (%) | process_cpu_load{name="process",} 5.0 |
+| process_cpu_time | name="cpu" | core | total Process CPU Time Occupied (ns) | process_cpu_time{name="process",} 3.265625E9 |
+| process_max_mem | name="memory" | core | The maximum available memory for the JVM | process_max_mem{name="process",} 3.545759744E9 |
+| process_used_mem | name="memory" | core | The current available memory for the JVM | process_used_mem{name="process",} 4.6065456E7 |
+| process_total_mem | name="memory" | core | The current requested memory for the JVM | process_total_mem{name="process",} 2.39599616E8 |
+| process_free_mem | name="memory" | core | The free available memory for the JVM | process_free_mem{name="process",} 1.94035584E8 |
+| process_mem_ratio | name="memory" | core | Memory footprint ratio of process | process_mem_ratio{name="process",} 0.0 |
+| process_threads_count | name="process" | core | The current number of threads | process_threads_count{name="process",} 11.0 |
+| process_status | name="process" | core | The process survivor status, 1.0 means survivorship, and 0.0 means terminated | process_status{name="process",} 1.0 |
+
+#### 4.4.5. System
+| Metric | Tag | level | Description | 示例 |
+| ------------------------------ | ------------- | --------- | ----------------------------------------------------------- | -------------------------------------------------------------- |
+| sys_cpu_load | name="cpu" | core | current system CPU Usage(%) | sys_cpu_load{name="system",} 15.0 |
+| sys_cpu_cores | name="cpu" | core | available CPU cores | sys_cpu_cores{name="system",} 16.0 |
+| sys_total_physical_memory_size | name="memory" | core | Maximum physical memory of system | sys_total_physical_memory_size{name="system",} 1.5950999552E10 |
+| sys_free_physical_memory_size | name="memory" | core | The current available memory of system | sys_free_physical_memory_size{name="system",} 4.532396032E9 |
+| sys_total_swap_space_size | name="memory" | core | The maximum swap area of system | sys_total_swap_space_size{name="system",} 2.1051273216E10 |
+| sys_free_swap_space_size | name="memory" | core | The available swap area of system | sys_free_swap_space_size{name="system",} 2.931576832E9 |
+| sys_committed_vm_size | name="memory" | important | the amount of virtual memory available to running processes | sys_committed_vm_size{name="system",} 5.04344576E8 |
+| sys_disk_total_space | name="disk" | core | The total disk space | sys_disk_total_space{name="system",} 5.10770798592E11 |
+| sys_disk_free_space | name="disk" | core | The available disk space | sys_disk_free_space{name="system",} 3.63467845632E11 |
### 4.5. Add custom metrics
- If you want to add your own metrics data in IoTDB, please see the [IoTDB Metric Framework] (https://github.com/apache/iotdb/tree/master/metrics) document.
@@ -215,6 +232,9 @@ The metrics collection switch is disabled by default,you need to enable it fro
# whether enable the module
enableMetric: false
+# Is stat performance of operation latency
+enablePerformanceStat: false
+
# Multiple reporter, options: [JMX, PROMETHEUS, IOTDB], IOTDB is off by default
metricReporterList:
- JMX
@@ -226,15 +246,22 @@ monitorType: MICROMETER
# Level of metric level, options: [CORE, IMPORTANT, NORMAL, ALL]
metricLevel: IMPORTANT
-# Predefined metric, options: [JVM, LOGBACK], LOGBACK are not supported in dropwizard
+# Predefined metric, options: [JVM, LOGBACK, FILE, PROCESS, SYSTEM]
predefinedMetrics:
- JVM
-
-# Period time of push, only used by IoTDB Reporter
-pushPeriodInSecond: 5
+ - FILE
# The http server's port for prometheus exporter to get metric data.
prometheusExporterPort: 9091
+
+# The config of iotdb reporter
+ioTDBReporterConfig:
+ host: 127.0.0.1
+ port: 6667
+ username: root
+ password: root
+ database: _metric
+ pushPeriodInSecond: 15
```
Then you can get metrics data as follows
@@ -320,8 +347,14 @@ The following documents may help you have a good journey with Prometheus and Gra
[Grafana query metrics from Prometheus](https://prometheus.io/docs/visualization/grafana/#grafana-support-for-prometheus)
-Here are two demo pictures of IoTDB's metrics data in Grafana.
+### 5.3. Apache IoTDB Dashboard
+We provide the Apache IoTDB Dashboard, and the rendering shown in Grafana is as follows:
+
+
+
+How to get Apache IoTDB Dashboard:
-
+1. You can obtain the json files of Dashboards corresponding to different iotdb versions in the grafana-metrics-example folder.
+2. You can visit [Grafana Dashboard official website](https://grafana.com/grafana/dashboards/), search for `Apache IoTDB Dashboard` and use
-
\ No newline at end of file
+When creating Grafana, you can select the json file you just downloaded to `Import` and select the corresponding target data source for Apache IoTDB Dashboard.
\ No newline at end of file
diff --git a/docs/UserGuide/Process-Data/Alerting.md b/docs/UserGuide/Process-Data/Alerting.md
index 1a4fd3a0bb8d4..3e2f2530ae985 100644
--- a/docs/UserGuide/Process-Data/Alerting.md
+++ b/docs/UserGuide/Process-Data/Alerting.md
@@ -353,7 +353,7 @@ whose operation logic is defined
by `org.apache.iotdb.trigger.AlertingExample` java class.
``` sql
- CREATE TRIGGER root-ln-wf01-wt01-alert
+ CREATE TRIGGER `root-ln-wf01-wt01-alert`
AFTER INSERT
ON root.ln.wf01.wt01.temperature
AS "org.apache.iotdb.trigger.AlertingExample"
diff --git a/docs/UserGuide/Process-Data/Select-Into.md b/docs/UserGuide/Process-Data/Select-Into.md
index 3e86c201b6271..1754d10588ef6 100644
--- a/docs/UserGuide/Process-Data/Select-Into.md
+++ b/docs/UserGuide/Process-Data/Select-Into.md
@@ -235,11 +235,11 @@ When the target aligned timeseries are not existed, the system will automaticall
### Other Restrictions
* The number of source series in the `select` clause and the number of target series in the `into` clause must be the same.
-* The `select *` clause is not supported.
-* The target series in the `into` clause do not need to be created in advance.
-* When the target series in the `into` clause already exist, you need to ensure that the source series in the `select` clause and the target series in the `into` clause have the same data types.
+* The `select *` and `select **` clause are not supported.
+* The target series in the `into` clause do not need to be created in advance. When the target series in the `into` clause already exist, you need to ensure that the source series in the `select` clause and the target series in the `into` clause have the same data types.
* The target series in the `into` clause must be different from each other.
* Only one prefix path of a series is allowed in the `from` clause.
+* `*` and `**` are not allowed in the `from` clause.
* Aligned Timeseries has not been supported in Time series generating function query(including UDF query)/ Arithmetic query / Nested query yet. An error message is expected if you use these types of query with Aligned Timeseries selected in the `select` clause.
diff --git a/docs/UserGuide/QuickStart/WayToGetIoTDB.md b/docs/UserGuide/QuickStart/WayToGetIoTDB.md
index 8f0c2829ac586..9f171bedd0df2 100644
--- a/docs/UserGuide/QuickStart/WayToGetIoTDB.md
+++ b/docs/UserGuide/QuickStart/WayToGetIoTDB.md
@@ -51,7 +51,13 @@ You can download the source code from:
git clone https://github.com/apache/iotdb.git
```
-Under the root path of iotdb:
+After that, go to the root path of IoTDB. If you want to build the version that we have released, you need to create and check out a new branch by command `git checkout -b my_{project.version} v{project.version}`. E.g., you want to build the version `0.12.4`, you can execute this command to make it:
+
+```shell
+> git checkout -b my_0.12.4 v0.12.4
+```
+
+Then you can execute this command to build the version that you want:
```
> mvn clean package -DskipTests
@@ -69,6 +75,15 @@ If you would like to build the IoTDB server, you can run the following command u
After build, the IoTDB server will be at the folder "server/target/iotdb-server-{project.version}".
+If you would like to build a module, you can execute command `mvn clean package -pl {module.name} -am -DskipTests` under the root path of IoTDB.
+If you need the jar with dependencies, you can add parameter `-P get-jar-with-dependencies` after the command. E.g., If you need the jar of jdbc with dependencies, you can execute this command:
+
+```shell
+> mvn clean package -pl jdbc -am -DskipTests -P get-jar-with-dependencies
+```
+
+Then you can find it under the path `{module.name}/target`.
+
### Installation by Docker (Dockerfile)
Apache IoTDB' Docker image is released on [https://hub.docker.com/r/apache/iotdb](https://hub.docker.com/r/apache/iotdb),
diff --git a/docs/zh/UserGuide/API/Programming-Java-Native-API.md b/docs/zh/UserGuide/API/Programming-Java-Native-API.md
index 5edd930d1fe83..1f7e3785047ba 100644
--- a/docs/zh/UserGuide/API/Programming-Java-Native-API.md
+++ b/docs/zh/UserGuide/API/Programming-Java-Native-API.md
@@ -498,7 +498,8 @@ void testInsertTablets(Map tablets)
如果超过 60s 都没得到一个连接的话,那么会打印一条警告日志,但是程序仍将继续等待。
当一个连接被用完后,他会自动返回池中等待下次被使用;
-当一个连接损坏后,他会从池中被删除,并重建一个连接重新执行用户的操作。
+当一个连接损坏后,他会从池中被删除,并重建一个连接重新执行用户的操作;
+你还可以像创建 Session 那样在创建 SessionPool 时指定多个可连接节点的 url,以保证分布式集群中客户端的高可用性。
对于查询操作:
diff --git a/docs/zh/UserGuide/API/Programming-Python-Native-API.md b/docs/zh/UserGuide/API/Programming-Python-Native-API.md
index d62d6d082f64a..d15bb830e2b3f 100644
--- a/docs/zh/UserGuide/API/Programming-Python-Native-API.md
+++ b/docs/zh/UserGuide/API/Programming-Python-Native-API.md
@@ -27,7 +27,9 @@
## 如何使用 (示例)
-首先下载包:`pip3 install apache-iotdb`
+首先下载最新安装包:`pip3 install apache-iotdb`
+
+*注意:如果您想要安装 0.13.0 版本的 Python API,不要使用 `pip install apache-iotdb==0.13.0`,请使用 `pip install apache-iotdb==0.13.0.post1` 作为替代!*
您可以从这里得到一个使用该包进行数据读写的例子:[Session Example](https://github.com/apache/iotdb/blob/master/client-py/SessionExample.py)
@@ -42,8 +44,8 @@ from iotdb.Session import Session
ip = "127.0.0.1"
port_ = "6667"
-username_ = 'root'
-password_ = 'root'
+username_ = "root"
+password_ = "root"
session = Session(ip, port_, username_, password_)
session.open(False)
zone = session.get_time_zone()
@@ -97,20 +99,20 @@ session.delete_storage_groups(group_name_lst)
```python
session.create_time_series(ts_path, data_type, encoding, compressor,
- props=None, tags=None, attributes=None, alias=None)
+ props=None, tags=None, attributes=None, alias=None)
session.create_multi_time_series(
- ts_path_lst, data_type_lst, encoding_lst, compressor_lst,
- props_lst=None, tags_lst=None, attributes_lst=None, alias_lst=None
- )
+ ts_path_lst, data_type_lst, encoding_lst, compressor_lst,
+ props_lst=None, tags_lst=None, attributes_lst=None, alias_lst=None
+)
```
* 创建对齐时间序列
```python
session.create_aligned_time_series(
- device_id, measurements_lst, data_type_lst, encoding_lst, compressor_lst
- )
+ device_id, measurements_lst, data_type_lst, encoding_lst, compressor_lst
+)
```
注意:目前**暂不支持**使用传感器别名。
@@ -156,13 +158,12 @@ session.insert_tablet(tablet_)
```
* Numpy Tablet
-相较于普通 Tablet,Numpy Tablet 使用 [numpy ndarray](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html) 来记录数值型数据。
+相较于普通 Tablet,Numpy Tablet 使用 [numpy.ndarray](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html) 来记录数值型数据。
内存占用和序列化耗时会降低很多,写入效率也会有很大提升。
**注意**
-1. Tablet 中的每一列值记录为一个 ndarray
-2. ndarray 需要为大端类型的数据类型,具体可参考下面的例子
-3. TEXT 类型数据不支持 ndarray
+1. Tablet 中的每一列时间戳和值记录为一个 ndarray
+2. ndarray 推荐使用如下面例子中的特定的 dtype,如果不使用,不会影响正确性。
```python
data_types_ = [
@@ -174,16 +175,16 @@ data_types_ = [
TSDataType.TEXT,
]
np_values_ = [
- np.array([False, True, False, True], np.dtype('>?')),
- np.array([10, 100, 100, 0], np.dtype('>i4')),
- np.array([11, 11111, 1, 0], np.dtype('>i8')),
- np.array([1.1, 1.25, 188.1, 0], np.dtype('>f4')),
- np.array([10011.1, 101.0, 688.25, 6.25], np.dtype('>f8')),
- ["test01", "test02", "test03", "test04"],
+ np.array([False, True, False, True], TSDataType.BOOLEAN.np_dtype()),
+ np.array([10, 100, 100, 0], TSDataType.INT32.np_dtype()),
+ np.array([11, 11111, 1, 0], TSDataType.INT64.np_dtype()),
+ np.array([1.1, 1.25, 188.1, 0], TSDataType.FLOAT.np_dtype()),
+ np.array([10011.1, 101.0, 688.25, 6.25], TSDataType.DOUBLE.np_dtype()),
+ np.array(["test01", "test02", "test03", "test04"], TSDataType.TEXT.np_dtype()),
]
-np_timestamps_ = np.array([1, 2, 3, 4], np.dtype('>i8'))
+np_timestamps_ = np.array([1, 2, 3, 4], TSDataType.INT64.np_dtype())
np_tablet_ = NumpyTablet(
- "root.sg_test_01.d_02", measurements_, data_types_, np_values_, np_timestamps_
+ "root.sg_test_01.d_02", measurements_, data_types_, np_values_, np_timestamps_
)
session.insert_tablet(np_tablet_)
```
@@ -260,8 +261,8 @@ from iotdb.Session import Session
ip = "127.0.0.1"
port_ = "6667"
-username_ = 'root'
-password_ = 'root'
+username_ = "root"
+password_ = "root"
session = Session(ip, port_, username_, password_)
session.open(False)
result = session.execute_query_statement("SELECT ** FROM root")
@@ -286,7 +287,7 @@ class MyTestCase(unittest.TestCase):
def test_something(self):
with IoTDBContainer() as c:
- session = Session('localhost', c.get_exposed_port(6667), 'root', 'root')
+ session = Session("localhost", c.get_exposed_port(6667), "root", "root")
session.open(False)
result = session.execute_query_statement("SHOW TIMESERIES")
print(result)
@@ -338,8 +339,8 @@ from iotdb.Session import Session
ip = "127.0.0.1"
port_ = "6667"
-username_ = 'root'
-password_ = 'root'
+username_ = "root"
+password_ = "root"
session = Session(ip, port_, username_, password_)
session.open(False)
zone = session.get_time_zone()
diff --git a/docs/zh/UserGuide/Ecosystem Integration/DBeaver.md b/docs/zh/UserGuide/Ecosystem Integration/DBeaver.md
index d35d56f4a950b..26b198dd47bfc 100644
--- a/docs/zh/UserGuide/Ecosystem Integration/DBeaver.md
+++ b/docs/zh/UserGuide/Ecosystem Integration/DBeaver.md
@@ -51,23 +51,25 @@ DBeaver 是一个 SQL 客户端和数据库管理工具。DBeaver 可以使用 I

-5. 添加下图中的这些库,点击 `Find Class`
-
- 也可以在源代码运行`mvn clean package -pl jdbc -am -DskipTests -P get-jar-with-dependencies`后,在`iotdb/jdbc/target/` 下找到并添加名为`iotdb-jdbc-{version}-jar-with-dependencies.jar`的库。
-
+5. 下载[源代码](https://iotdb.apache.org/zh/Download/),解压并运行下面的命令编译 jdbc 驱动
- 
+ ```shell
+ mvn clean package -pl jdbc -am -DskipTests -P get-jar-with-dependencies
+ ```
+7. 在`jdbc/target/`下找到并添加名为`apache-iotdb-jdbc-{version}-jar-with-dependencies.jar`的库,点击 `Find Class`。
-6. 编辑驱动设置
+ 
- 
+8. 编辑驱动设置
+
+ 
-7. 新建 DataBase Connection, 选择 iotdb
+9. 新建 DataBase Connection, 选择 iotdb
+
+ 
- 
+10. 编辑 JDBC 连接设置
-8. 编辑 JDBC 连接设置
-
```
JDBC URL: jdbc:iotdb://127.0.0.1:6667/
Username: root
@@ -75,10 +77,10 @@ DBeaver 是一个 SQL 客户端和数据库管理工具。DBeaver 可以使用 I
```

-9. 测试连接
+11. 测试连接

-10. 可以开始通过 DBeaver 使用 IoTDB
+12. 可以开始通过 DBeaver 使用 IoTDB

diff --git a/docs/zh/UserGuide/Ecosystem Integration/Grafana Plugin.md b/docs/zh/UserGuide/Ecosystem Integration/Grafana Plugin.md
index 7d5088b1aca5e..c40913c8c9f51 100644
--- a/docs/zh/UserGuide/Ecosystem Integration/Grafana Plugin.md
+++ b/docs/zh/UserGuide/Ecosystem Integration/Grafana Plugin.md
@@ -96,19 +96,20 @@ yarn build
#### grafana-plugin 插件安装
-* 拷贝上述生成的前端工程目标文件夹到 Grafana 的插件目录中 `${Grafana文件目录}\data\plugins\`
- * Windows 系统,启动 Grafana 后会自动创建 `data\plugins` 目录
- * Linux 系统,plugins 目录需要手动创建 `/var/lib/grafana/plugins`
- * MacOS,plugins 目录在`/usr/local/var/lib/grafana/plugins`(具体位置参看使用 `brew install`安装 Grafana 后的命令行输出提示)
+* 拷贝上述生成的前端工程目标文件夹到 Grafana 的插件目录中 `${Grafana文件目录}\data\plugins\`。如果没有此目录可以手动建或者启动grafana会自动建立,当然也可以修改plugins的位置,具体请查看下面的修改Grafana 的插件目录位置说明。
* 修改Grafana的配置文件:找到配置文件(`${Grafana文件目录}\conf\defaults.ini`),并进行如下的修改:
```ini
allow_loading_unsigned_plugins = iotdb
```
+* 修改Grafana 的插件目录位置:找到配置文件(`${Grafana文件目录}\conf\defaults.ini`),并进行如下的修改:
+ ```ini
+ plugins = data/plugins
+ ```
* 如果 Grafana 服务已启动,则需要重启服务。
-
+更多详情,请点 [这里](https://grafana.com/docs/grafana/latest/plugins/installation/)
#### 启动 Grafana
diff --git a/docs/zh/UserGuide/Maintenance-Tools/Metric-Tool.md b/docs/zh/UserGuide/Maintenance-Tools/Metric-Tool.md
index 7df061f140514..44be673b9c05f 100644
--- a/docs/zh/UserGuide/Maintenance-Tools/Metric-Tool.md
+++ b/docs/zh/UserGuide/Maintenance-Tools/Metric-Tool.md
@@ -31,12 +31,12 @@
系统变慢几乎是最常见也最头疼的问题,这时候我们需要尽可能多的信息来帮助我们找到系统变慢的原因,比如:
- - JVM信息:是不是有FGC?GC耗时多少?GC后内存有没有恢复?是不是有大量的线程?
- - 系统信息:CPU使用率是不是太高了?磁盘IO是不是很频繁?
- - 连接数:当前连接是不是太多?
- - 接口:当前TPS是多少?各个接口耗时有没有变化?
- - 线程池:系统中各种任务是否有积压?
- - 缓存命中率
+ - JVM信息:是不是有FGC?GC耗时多少?GC后内存有没有恢复?是不是有大量的线程?
+ - 系统信息:CPU使用率是不是太高了?磁盘IO是不是很频繁?
+ - 连接数:当前连接是不是太多?
+ - 接口:当前TPS是多少?各个接口耗时有没有变化?
+ - 线程池:系统中各种任务是否有积压?
+ - 缓存命中率
2. 磁盘快满了
@@ -76,132 +76,152 @@ IoTDB对外提供JMX和Prometheus格式的监控指标,对于JMX,可以通
#### 4.3.1. 接入层
-| Metric | Tag | level | 说明 | 示例 |
-| ------------------- | --------------- | ------ | ---------------- | -------------------------------------------- |
+| Metric | Tag | level | 说明 | 示例 |
+| ------------------- | --------------- | --------- | ---------------- | -------------------------------------------- |
| entry_seconds_count | name="接口名" | important | 接口累计访问次数 | entry_seconds_count{name="openSession",} 1.0 |
| entry_seconds_sum | name="接口名" | important | 接口累计耗时(s) | entry_seconds_sum{name="openSession",} 0.024 |
| entry_seconds_max | name="接口名" | important | 接口最大耗时(s) | entry_seconds_max{name="openSession",} 0.024 |
| quantity_total | name="pointsIn" | important | 系统累计写入点数 | quantity_total{name="pointsIn",} 1.0 |
-#### 4.3.2. 文件
+#### 4.3.2. Task
-| Metric | Tag | level | 说明 | 示例 |
-| ---------- | -------------------- | ------ | ----------------------------------- | --------------------------- |
-| file_size | name="wal/seq/unseq" | important | 当前时间wal/seq/unseq文件大小(byte) | file_size{name="wal",} 67.0 |
-| file_count | name="wal/seq/unseq" | important | 当前时间wal/seq/unseq文件个数 | file_count{name="seq",} 1.0 |
-
-#### 4.3.3. Flush
-
-| Metric | Tag | level | 说明 | 示例 |
-| ----------------------- | ------------------------------------------- | ------ | -------------------------------- | --------------------------------------------------------------------------------------- |
-| queue | name="flush", status="running/waiting" | important | 当前时间flush任务数 | queue{name="flush",status="waiting",} 0.0 queue{name="flush",status="running",} 0.0 |
-| cost_task_seconds_count | name="flush" | important | flush累计发生次数 | cost_task_seconds_count{name="flush",} 1.0 |
-| cost_task_seconds_max | name="flush" | important | 到目前为止flush耗时(s)最大的一次 | cost_task_seconds_max{name="flush",} 0.363 |
-| cost_task_seconds_sum | name="flush" | important | flush累计耗时(s) | cost_task_seconds_sum{name="flush",} 0.363 |
+| Metric | Tag | level | 说明 | 示例 |
+| ----------------------- | ----------------------------------------------------------------------------- | --------- | ------------------------------- | -------------------------------------------------------------------------------------------------- |
+| queue | name="compaction_inner/compaction_cross/flush", status="running/waiting" | important | 当前时间任务数 | queue{name="flush",status="waiting",} 0.0 queue{name="compaction/flush",status="running",} 0.0 |
+| cost_task_seconds_count | name="compaction/flush" | important | 任务累计发生次数 | cost_task_seconds_count{name="flush",} 1.0 |
+| cost_task_seconds_max | name="compaction/flush" | important | 到目前为止任务耗时(s)最大的一次 | cost_task_seconds_max{name="flush",} 0.363 |
+| cost_task_seconds_sum | name="compaction/flush" | important | 任务累计耗时(s) | cost_task_seconds_sum{name="flush",} 0.363 |
+| data_written | name="compaction", type="aligned/not-aligned/total" | important | 合并文件时写入量 | data_written{name="compaction",type="total",} 10240 |
+| data_read | name="compaction" | important | 合并文件时的读取量 | data_read={name="compaction",} 10240 |
-#### 4.3.4. Compaction
+#### 4.3.3. 内存占用
-| Metric | Tag | level | 说明 | 示例 |
-| ----------------------- | ----------------------------------------------------------------------- | ------ | ------------------------------------- | ---------------------------------------------------- |
-| queue | name="compaction_inner/compaction_cross", status="running/waiting" | important | 当前时间compaction任务数 | queue{name="compaction_inner",status="waiting",} 0.0 |
-| cost_task_seconds_count | name="compaction" | important | compaction累计发生次数 | cost_task_seconds_count{name="compaction",} 1.0 |
-| cost_task_seconds_max | name="compaction" | important | 到目前为止compaction耗时(s)最大的一次 | cost_task_seconds_max{name="compaction",} 0.363 |
-| cost_task_seconds_sum | name="compaction" | important | compaction累计耗时(s) | cost_task_seconds_sum{name="compaction",} 0.363 |
-
-#### 4.3.5. 内存占用
-
-| Metric | Tag | 说明 | level | 示例 |
-| ------ | --------------------------------------- | ------ | -------------------------------------------------- | --------------------------------- |
+| Metric | Tag | level | 说明 | 示例 |
+| ------ | --------------------------------------- | --------- | -------------------------------------------------- | --------------------------------- |
| mem | name="chunkMetaData/storageGroup/mtree" | important | chunkMetaData/storageGroup/mtree占用的内存(byte) | mem{name="chunkMetaData",} 2050.0 |
-#### 4.3.6. 缓存命中率
+#### 4.3.4. 缓存命中率
-| Metric | Tag | level | 说明 | 示例 |
-| --------- | --------------------------------------- | ------ | ------------------------------------------------ | --------------------------- |
+| Metric | Tag | level | 说明 | 示例 |
+| --------- | --------------------------------------- | --------- | ------------------------------------------------ | --------------------------- |
| cache_hit | name="chunk/timeSeriesMeta/bloomFilter" | important | chunk/timeSeriesMeta缓存命中率,bloomFilter拦截率 | cache_hit{name="chunk",} 80 |
-#### 4.3.7. 业务数据
+#### 4.3.5. 业务数据
-| Metric | Tag | level | 说明 | 示例 |
-| -------- | ------------------------------------- | ------ | -------------------------------------------- | -------------------------------- |
+| Metric | Tag | level | 说明 | 示例 |
+| -------- | ------------------------------------- | --------- | -------------------------------------------- | -------------------------------- |
| quantity | name="timeSeries/storageGroup/device" | important | 当前时间timeSeries/storageGroup/device的数量 | quantity{name="timeSeries",} 1.0 |
-#### 4.3.8. 集群
+#### 4.3.6. 集群
-| Metric | Tag | level | 说明 | 示例 |
-| ------------------------- | ------------------------------- | ------ | ------------------------------------------------------------- | ---------------------------------------------------------------------------- |
+| Metric | Tag | level | 说明 | 示例 |
+| ------------------------- | ------------------------------- | --------- | ------------------------------------------------------------- | ---------------------------------------------------------------------------- |
| cluster_node_leader_count | name="{{ip}}" | important | 节点上```dataGroupLeader```的数量,用来观察leader是否分布均匀 | cluster_node_leader_count{name="127.0.0.1",} 2.0 |
| cluster_uncommitted_log | name="{{ip_datagroupHeader}}" | important | 节点```uncommitted_log```的数量 | cluster_uncommitted_log{name="127.0.0.1_Data-127.0.0.1-40010-raftId-0",} 0.0 |
| cluster_node_status | name="{{ip}}" | important | 节点状态,1=online 2=offline | cluster_node_status{name="127.0.0.1",} 1.0 |
| cluster_elect_total | name="{{ip}}",status="fail/win" | important | 节点参与选举的次数及结果 | cluster_elect_total{name="127.0.0.1",status="win",} 1.0 |
### 4.4. IoTDB 预定义指标集
-用户可以在`iotdb-metric.yml`文件中,修改`predefinedMetrics`的值来启用预定义指标集,其中`LOGBACK`在`dropwizard`中不支持。
+
+用户可以在`iotdb-metric.yml`文件中,修改`predefinedMetrics`的值来启用预定义指标集,目前有`JVM`、`LOGBACK`、`FILE`、`PROCESS`、`SYSYTEM`这五种。
#### 4.4.1. JVM
##### 4.4.1.1. 线程
-| Metric | Tag | 说明 | 示例 |
-| -------------------------- | ------------------------------------------------------------- | ------------------------ | -------------------------------------------------- |
-| jvm_threads_live_threads | 无 | 当前线程数 | jvm_threads_live_threads 25.0 |
-| jvm_threads_daemon_threads | 无 | 当前daemon线程数 | jvm_threads_daemon_threads 12.0 |
-| jvm_threads_peak_threads | 无 | 峰值线程数 | jvm_threads_peak_threads 28.0 |
-| jvm_threads_states_threads | state="runnable/blocked/waiting/timed-waiting/new/terminated" | 当前处于各种状态的线程数 | jvm_threads_states_threads{state="runnable",} 10.0 |
+| Metric | Tag | level | 说明 | 示例 |
+| -------------------------- | ------------------------------------------------------------- | --------- | ------------------------ | -------------------------------------------------- |
+| jvm_threads_live_threads | 无 | important | 当前线程数 | jvm_threads_live_threads 25.0 |
+| jvm_threads_daemon_threads | 无 | important | 当前daemon线程数 | jvm_threads_daemon_threads 12.0 |
+| jvm_threads_peak_threads | 无 | important | 峰值线程数 | jvm_threads_peak_threads 28.0 |
+| jvm_threads_states_threads | state="runnable/blocked/waiting/timed-waiting/new/terminated" | important | 当前处于各种状态的线程数 | jvm_threads_states_threads{state="runnable",} 10.0 |
##### 4.4.1.2. 垃圾回收
-| Metric | Tag | 说明 | 示例 |
-| ----------------------------------- | ------------------------------------------------------ | -------------------------------------------- | --------------------------------------------------------------------------------------- |
-| jvm_gc_pause_seconds_count | action="end of major GC/end of minor GC",cause="xxxx" | YGC/FGC发生次数及其原因 | jvm_gc_pause_seconds_count{action="end of major GC",cause="Metadata GC Threshold",} 1.0 |
-| jvm_gc_pause_seconds_sum | action="end of major GC/end of minor GC",cause="xxxx" | YGC/FGC累计耗时及其原因 | jvm_gc_pause_seconds_sum{action="end of major GC",cause="Metadata GC Threshold",} 0.03 |
-| jvm_gc_pause_seconds_max | action="end of major GC",cause="Metadata GC Threshold" | YGC/FGC最大耗时及其原因 | jvm_gc_pause_seconds_max{action="end of major GC",cause="Metadata GC Threshold",} 0.0 |
-| jvm_gc_overhead_percent | 无 | GC消耗cpu的比例 | jvm_gc_overhead_percent 0.0 |
-| jvm_gc_memory_promoted_bytes_total | 无 | 从GC之前到GC之后老年代内存池大小正增长的累计 | jvm_gc_memory_promoted_bytes_total 8425512.0 |
-| jvm_gc_max_data_size_bytes | 无 | 老年代内存的历史最大值 | jvm_gc_max_data_size_bytes 2.863661056E9 |
-| jvm_gc_live_data_size_bytes | 无 | GC后老年代内存的大小 | jvm_gc_live_data_size_bytes 8450088.0 |
-| jvm_gc_memory_allocated_bytes_total | 无 | 在一个GC之后到下一个GC之前年轻代增加的内存 | jvm_gc_memory_allocated_bytes_total 4.2979144E7 |
+| Metric | Tag | level | 说明 | 示例 |
+| ----------------------------------- | ------------------------------------------------------ | --------- | -------------------------------------------- | --------------------------------------------------------------------------------------- |
+| jvm_gc_pause_seconds_count | action="end of major GC/end of minor GC",cause="xxxx" | important | YGC/FGC发生次数及其原因 | jvm_gc_pause_seconds_count{action="end of major GC",cause="Metadata GC Threshold",} 1.0 |
+| jvm_gc_pause_seconds_sum | action="end of major GC/end of minor GC",cause="xxxx" | important | YGC/FGC累计耗时及其原因 | jvm_gc_pause_seconds_sum{action="end of major GC",cause="Metadata GC Threshold",} 0.03 |
+| jvm_gc_pause_seconds_max | action="end of major GC",cause="Metadata GC Threshold" | important | YGC/FGC最大耗时及其原因 | jvm_gc_pause_seconds_max{action="end of major GC",cause="Metadata GC Threshold",} 0.0 |
+| jvm_gc_memory_promoted_bytes_total | 无 | important | 从GC之前到GC之后老年代内存池大小正增长的累计 | jvm_gc_memory_promoted_bytes_total 8425512.0 |
+| jvm_gc_max_data_size_bytes | 无 | important | 老年代内存的历史最大值 | jvm_gc_max_data_size_bytes 2.863661056E9 |
+| jvm_gc_live_data_size_bytes | 无 | important | GC后老年代内存的大小 | jvm_gc_live_data_size_bytes 8450088.0 |
+| jvm_gc_memory_allocated_bytes_total | 无 | important | 在一个GC之后到下一个GC之前年轻代增加的内存 | jvm_gc_memory_allocated_bytes_total 4.2979144E7 |
##### 4.4.1.3. 内存
-| Metric | Tag | 说明 | 示例 |
-| ------------------------------- | ------------------------------- | ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| jvm_buffer_memory_used_bytes | id="direct/mapped" | 已经使用的缓冲区大小 | jvm_buffer_memory_used_bytes{id="direct",} 3.46728099E8 |
-| jvm_buffer_total_capacity_bytes | id="direct/mapped" | 最大缓冲区大小 | jvm_buffer_total_capacity_bytes{id="mapped",} 0.0 |
-| jvm_buffer_count_buffers | id="direct/mapped" | 当前缓冲区数量 | jvm_buffer_count_buffers{id="direct",} 183.0 |
-| jvm_memory_committed_bytes | {area="heap/nonheap",id="xxx",} | 当前向JVM申请的内存大小 | jvm_memory_committed_bytes{area="heap",id="Par Survivor Space",} 2.44252672E8 jvm_memory_committed_bytes{area="nonheap",id="Metaspace",} 3.9051264E7 |
-| jvm_memory_max_bytes | {area="heap/nonheap",id="xxx",} | JVM最大内存 | jvm_memory_max_bytes{area="heap",id="Par Survivor Space",} 2.44252672E8 jvm_memory_max_bytes{area="nonheap",id="Compressed Class Space",} 1.073741824E9 |
-| jvm_memory_used_bytes | {area="heap/nonheap",id="xxx",} | JVM已使用内存大小 | jvm_memory_used_bytes{area="heap",id="Par Eden Space",} 1.000128376E9 jvm_memory_used_bytes{area="nonheap",id="Code Cache",} 2.9783808E7 |
+| Metric | Tag | level | 说明 | 示例 |
+| ------------------------------- | ------------------------------- | --------- | ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| jvm_buffer_memory_used_bytes | id="direct/mapped" | important | 已经使用的缓冲区大小 | jvm_buffer_memory_used_bytes{id="direct",} 3.46728099E8 |
+| jvm_buffer_total_capacity_bytes | id="direct/mapped" | important | 最大缓冲区大小 | jvm_buffer_total_capacity_bytes{id="mapped",} 0.0 |
+| jvm_buffer_count_buffers | id="direct/mapped" | important | 当前缓冲区数量 | jvm_buffer_count_buffers{id="direct",} 183.0 |
+| jvm_memory_committed_bytes | {area="heap/nonheap",id="xxx",} | important | 当前向JVM申请的内存大小 | jvm_memory_committed_bytes{area="heap",id="Par Survivor Space",} 2.44252672E8 jvm_memory_committed_bytes{area="nonheap",id="Metaspace",} 3.9051264E7 |
+| jvm_memory_max_bytes | {area="heap/nonheap",id="xxx",} | important | JVM最大内存 | jvm_memory_max_bytes{area="heap",id="Par Survivor Space",} 2.44252672E8 jvm_memory_max_bytes{area="nonheap",id="Compressed Class Space",} 1.073741824E9 |
+| jvm_memory_used_bytes | {area="heap/nonheap",id="xxx",} | important | JVM已使用内存大小 | jvm_memory_used_bytes{area="heap",id="Par Eden Space",} 1.000128376E9 jvm_memory_used_bytes{area="nonheap",id="Code Cache",} 2.9783808E7 |
##### 4.4.1.4. Classes
-| Metric | Tag | 说明 | 示例 |
-| ---------------------------------- | --------------------------------------------- | ---------------------- | ----------------------------------------------------------------------------------- |
-| jvm_classes_unloaded_classes_total | 无 | jvm累计卸载的class数量 | jvm_classes_unloaded_classes_total 680.0 |
-| jvm_classes_loaded_classes | 无 | jvm累计加载的class数量 | jvm_classes_loaded_classes 5975.0 |
-| jvm_compilation_time_ms_total | {compiler="HotSpot 64-Bit Tiered Compilers",} | jvm耗费在编译上的时间 | jvm_compilation_time_ms_total{compiler="HotSpot 64-Bit Tiered Compilers",} 107092.0 |
+| Metric | Tag | level | 说明 | 示例 |
+| ---------------------------------- | --------------------------------------------- | --------- | ---------------------- | ----------------------------------------------------------------------------------- |
+| jvm_classes_unloaded_classes_total | 无 | important | jvm累计卸载的class数量 | jvm_classes_unloaded_classes_total 680.0 |
+| jvm_classes_loaded_classes | 无 | important | jvm累计加载的class数量 | jvm_classes_loaded_classes 5975.0 |
+| jvm_compilation_time_ms_total | {compiler="HotSpot 64-Bit Tiered Compilers",} | important | jvm耗费在编译上的时间 | jvm_compilation_time_ms_total{compiler="HotSpot 64-Bit Tiered Compilers",} 107092.0 |
+
+#### 4.4.2. 文件(File)
-#### 4.4.2. 日志(logback)
+| Metric | Tag | level | 说明 | 示例 |
+| ---------- | -------------------- | --------- | ----------------------------------- | --------------------------- |
+| file_size | name="wal/seq/unseq" | important | 当前时间wal/seq/unseq文件大小(byte) | file_size{name="wal",} 67.0 |
+| file_count | name="wal/seq/unseq" | important | 当前时间wal/seq/unseq文件个数 | file_count{name="seq",} 1.0 |
-| Metric | Tag | 说明 | 示例 |
-| -------------------- | -------------------------------------- | --------------------------------------- | --------------------------------------- |
-| logback_events_total | {level="trace/debug/info/warn/error",} | trace/debug/info/warn/error日志累计数量 | logback_events_total{level="warn",} 0.0 |
+#### 4.4.3. 日志(logback)
+
+| Metric | Tag | level | 说明 | 示例 |
+| -------------------- | -------------------------------------- | --------- | --------------------------------------- | --------------------------------------- |
+| logback_events_total | {level="trace/debug/info/warn/error",} | important | trace/debug/info/warn/error日志累计数量 | logback_events_total{level="warn",} 0.0 |
+
+#### 4.4.4. 进程(Process)
+| Metric | Tag | level | 说明 | 示例 |
+| --------------------- | -------------- | ----- | ---------------------------------- | ----------------------------------------------- |
+| process_cpu_load | name="cpu" | core | process当前CPU占用率(%) | process_cpu_load{name="process",} 5.0 |
+| process_cpu_time | name="cpu" | core | process累计占用CPU时间(ns) | process_cpu_time{name="process",} 3.265625E9 |
+| process_max_mem | name="memory" | core | JVM最大可用内存 | process_max_mem{name="process",} 3.545759744E9 |
+| process_used_mem | name="memory" | core | JVM当前使用内存 | process_used_mem{name="process",} 4.6065456E7 |
+| process_total_mem | name="memory" | core | JVM当前已申请内存 | process_total_mem{name="process",} 2.39599616E8 |
+| process_free_mem | name="memory" | core | JVM当前剩余可用内存 | process_free_mem{name="process",} 1.94035584E8 |
+| process_mem_ratio | name="memory" | core | 进程的内存占用比例 | process_mem_ratio{name="process",} 0.0 |
+| process_threads_count | name="process" | core | 当前线程数 | process_threads_count{name="process",} 11.0 |
+| process_status | name="process" | core | 进程存活状态,1.0为存活,0.0为终止 | process_status{name="process",} 1.0 |
+
+#### 4.4.5. 系统(System)
+| Metric | Tag | level | 说明 | 示例 |
+| ------------------------------ | ------------- | --------- | ------------------------------------------ | -------------------------------------------------------------- |
+| sys_cpu_load | name="cpu" | core | system当前CPU占用率(%) | sys_cpu_load{name="system",} 15.0 |
+| sys_cpu_cores | name="cpu" | core | jvm可用处理器数 | sys_cpu_cores{name="system",} 16.0 |
+| sys_total_physical_memory_size | name="memory" | core | system最大物理内存 | sys_total_physical_memory_size{name="system",} 1.5950999552E10 |
+| sys_free_physical_memory_size | name="memory" | core | system当前剩余可用内存 | sys_free_physical_memory_size{name="system",} 4.532396032E9 |
+| sys_total_swap_space_size | name="memory" | core | system交换区最大空间 | sys_total_swap_space_size{name="system",} 2.1051273216E10 |
+| sys_free_swap_space_size | name="memory" | core | system交换区剩余可用空间 | sys_free_swap_space_size{name="system",} 2.931576832E9 |
+| sys_committed_vm_size | name="memory" | important | system保证可用于正在运行的进程的虚拟内存量 | sys_committed_vm_size{name="system",} 5.04344576E8 |
+| sys_disk_total_space | name="disk" | core | 磁盘总大小 | sys_disk_total_space{name="system",} 5.10770798592E11 |
+| sys_disk_free_space | name="disk" | core | 磁盘可用大小 | sys_disk_free_space{name="system",} 3.63467845632E11 |
### 4.5. 自定义添加埋点
+
- 如果想自己在IoTDB中添加更多Metrics埋点,可以参考[IoTDB Metrics Framework](https://github.com/apache/iotdb/tree/master/metrics)使用说明
- Metric 埋点定义规则
- - `Metric`:监控项的名称,比如`entry_seconds_count`为接口累计访问次数,file_size 为文件总数。
- - `Tags`:Key-Value对,用来明确被监控项,可选项
- - `name = xxx`:被监控项的名称,比如对`entry_seconds_count`这个监控项,name 的含义是被监控的接口名称。
- - `status = xxx`:被监控项的状态细分,比如监控 Task 的监控项可以通过该参数,将运行的 Task 和停止的 Task 分开。
- - `user = xxx`:被监控项和某个特定用户相关,比如统计root用户的写入总次数。
- - 根据具体情况自定义......
+ - `Metric`:监控项的名称,比如`entry_seconds_count`为接口累计访问次数,file_size 为文件总数。
+ - `Tags`:Key-Value对,用来明确被监控项,可选项
+ - `name = xxx`:被监控项的名称,比如对`entry_seconds_count`这个监控项,name 的含义是被监控的接口名称。
+ - `status = xxx`:被监控项的状态细分,比如监控 Task 的监控项可以通过该参数,将运行的 Task 和停止的 Task 分开。
+ - `user = xxx`:被监控项和某个特定用户相关,比如统计root用户的写入总次数。
+ - 根据具体情况自定义......
- 监控指标级别含义:
- - 线上运行默认启动级别为`Important`级,线下调试默认启动级别为`Normal`级,审核严格程度`Core > Important > Normal > All`
- - `Core`:系统的核心指标,供**运维人员**使用,关乎系统的**性能、稳定性、安全性**,比如实例的状况,系统的负载等。
- - `Important`:模块的重要指标,供**运维和测试人员**使用,直接关乎**每个模块的运行状态**,比如合并文件个数、执行情况等。
- - `Normal`:模块的一般指标,供**开发人员**使用,方便在出现问题时**定位模块**,比如合并中的特定关键操作情况。
- - `All`:模块的全部指标,供**模块开发人员**使用,往往在复现问题的时候使用,从而快速解决问题。
+ - 线上运行默认启动级别为`Important`级,线下调试默认启动级别为`Normal`级,审核严格程度`Core > Important > Normal > All`
+ - `Core`:系统的核心指标,供**运维人员**使用,关乎系统的**性能、稳定性、安全性**,比如实例的状况,系统的负载等。
+ - `Important`:模块的重要指标,供**运维和测试人员**使用,直接关乎**每个模块的运行状态**,比如合并文件个数、执行情况等。
+ - `Normal`:模块的一般指标,供**开发人员**使用,方便在出现问题时**定位模块**,比如合并中的特定关键操作情况。
+ - `All`:模块的全部指标,供**模块开发人员**使用,往往在复现问题的时候使用,从而快速解决问题。
## 5. 怎样获取这些metrics?
@@ -213,6 +233,9 @@ metric采集默认是关闭的,需要先到conf/iotdb-metric.yml中打开后
# 是否启动监控模块,默认为false
enableMetric: false
+# 是否启用操作延迟统计
+enablePerformanceStat: false
+
# 数据提供方式,对外部通过jmx和prometheus协议提供metrics的数据, 可选参数:[JMX, PROMETHEUS, IOTDB],IOTDB是默认关闭的。
metricReporterList:
- JMX
@@ -224,18 +247,26 @@ monitorType: MICROMETER
# 初始化metric的级别,可选参数: [CORE, IMPORTANT, NORMAL, ALL]
metricLevel: IMPORTANT
-# 预定义的指标集, 可选参数: [JVM, LOGBACK], 其中LOGBACK在dropwizard中不支持
+# 预定义的指标集, 可选参数: [JVM, LOGBACK, FILE, PROCESS, SYSTEM]
predefinedMetrics:
- JVM
-
-# 数据推送时间,该参数只对 IoTDB Reporter 生效
-pushPeriodInSecond: 5
+ - FILE
# Prometheus Reporter 使用的端口
-prometheusExporterPort: 9091
+prometheusExporterPort: 9091
+
+# IoTDB Reporter相关的配置
+ioTDBReporterConfig:
+ host: 127.0.0.1
+ port: 6667
+ username: root
+ password: root
+ database: _metric
+ pushPeriodInSecond: 15
```
然后按照下面的操作获取metrics数据
+
1. 打开配置文件中的metric开关
2. 其他参数使用默认配置即可
3. 启动IoTDB
@@ -303,8 +334,8 @@ metrics_path: /metrics
scheme: http
follow_redirects: true
static_configs:
-- targets:
- - localhost:9091
+ - targets:
+ - localhost:9091
```
更多细节可以参考下面的文档:
@@ -317,8 +348,14 @@ static_configs:
[Grafana从Prometheus查询数据并绘图的文档](https://prometheus.io/docs/visualization/grafana/#grafana-support-for-prometheus)
-最后是IoTDB的metrics数据在Grafana中显示的效果图:
+### 5.3. Apache IoTDB Dashboard
+我们提供了Apache IoTDB Dashboard,在Grafana中显示的效果图如下所示:
+
+
+
+Apache IoTDB Dashboard的获取方式:
-
+1. 您可以在grafana-metrics-example文件夹下获取到对应不同iotdb版本的Dashboard的json文件。
+2. 您可以访问[Grafana Dashboard官网](https://grafana.com/grafana/dashboards/)搜索`Apache IoTDB Dashboard`并使用
-
\ No newline at end of file
+在创建Grafana时,您可以选择Import刚刚下载的json文件,并为Apache IoTDB Dashboard选择对应目标数据源。
diff --git a/docs/zh/UserGuide/Process-Data/Alerting.md b/docs/zh/UserGuide/Process-Data/Alerting.md
index acc07fb74b57e..e757d5321d0cb 100644
--- a/docs/zh/UserGuide/Process-Data/Alerting.md
+++ b/docs/zh/UserGuide/Process-Data/Alerting.md
@@ -348,7 +348,7 @@ public class AlertingExample implements Trigger {
类定义的触发器。
``` sql
- CREATE TRIGGER root-ln-wf01-wt01-alert
+ CREATE TRIGGER `root-ln-wf01-wt01-alert`
AFTER INSERT
ON root.ln.wf01.wt01.temperature
AS "org.apache.iotdb.trigger.AlertingExample"
diff --git a/docs/zh/UserGuide/Process-Data/Select-Into.md b/docs/zh/UserGuide/Process-Data/Select-Into.md
index b1ca5a8ccb59c..6f9afc64e6d78 100644
--- a/docs/zh/UserGuide/Process-Data/Select-Into.md
+++ b/docs/zh/UserGuide/Process-Data/Select-Into.md
@@ -236,12 +236,12 @@ intoPath
### 其他限制
* `select`子句中的源序列和`into`子句中的目标序列数量必须相同
-* `select`子句不支持带 `*` 查询
-* `into`子句中的目标序列不必预先创建(可使用自动创建schema功能)
-* 当`into`子句中的目标序列已存在时,您需要保证`select`子句中的源序列和`into`子句中的目标序列的数据类型一致
+* `select`子句不支持带 `*`/`**` 查询
+* `into`子句中的目标序列不必预先创建(可使用自动创建schema功能),但是当`into`子句中的目标序列已存在时,您需要保证`select`子句中的源序列和`into`子句中的目标序列的数据类型一致
* `into`子句中的目标序列必须是互不相同的
* `from`子句只允许有一列序列前缀
-* 由于时间序列生成函数查询(UDF查询)/ 数学表达式查询 / 嵌套查询 尚不支持对齐时间序列(Aligned Timeseries),所以如果您在`select`子句中使用了上述查询,并且对应操作数包含对齐时间序列,会提示错误。
+* `from`子句不支持带 `*`/`**`
+* 由于时间序列生成函数查询(UDF查询)/ 数学表达式查询 / 嵌套查询 尚不支持对齐时间序列(Aligned Timeseries),所以如果您在`select`子句中使用了上述查询,并且对应操作数包含对齐时间序列,会提示错误
diff --git a/docs/zh/UserGuide/QuickStart/WayToGetIoTDB.md b/docs/zh/UserGuide/QuickStart/WayToGetIoTDB.md
index 6f7f7dd6d4b36..984dfc7b85152 100644
--- a/docs/zh/UserGuide/QuickStart/WayToGetIoTDB.md
+++ b/docs/zh/UserGuide/QuickStart/WayToGetIoTDB.md
@@ -51,7 +51,13 @@ Shell > uzip iotdb-.zip
您可以获取已发布的源码 [https://iotdb.apache.org/Download/](https://iotdb.apache.org/Download/) ,或者从 [https://github.com/apache/iotdb/tree/master](https://github.com/apache/iotdb/tree/master) git 仓库获取
-源码克隆后,进入到源码文件夹目录下,使用以下命令进行编译:
+源码克隆后,进入到源码文件夹目录下。如果您想编译已经发布过的版本,可以先用`git checkout -b my_{project.version} v{project.version}`命令新建并切换分支。比如您要编译0.12.4这个版本,您可以用如下命令去切换分支:
+
+```shell
+> git checkout -b my_0.12.4 v0.12.4
+```
+
+切换分支之后就可以使用以下命令进行编译:
```
> mvn clean package -pl server -am -Dmaven.test.skip=true
@@ -69,6 +75,14 @@ Shell > uzip iotdb-.zip
+- tools/ <-- system tools
```
+如果您想要编译项目中的某个模块,您可以在源码文件夹中使用`mvn clean package -pl {module.name} -am -DskipTests`命令进行编译。如果您需要的是带依赖的 jar 包,您可以在编译命令后面加上`-P get-jar-with-dependencies`参数。比如您想编译带依赖的 jdbc jar 包,您就可以使用以下命令进行编译:
+
+```shell
+> mvn clean package -pl jdbc -am -DskipTests -P get-jar-with-dependencies
+```
+
+编译完成后就可以在`{module.name}/target`目录中找到需要的包了。
+
### 通过 Docker 安装 (Dockerfile)
Apache IoTDB 的 Docker 镜像已经上传至 [https://hub.docker.com/r/apache/iotdb](https://hub.docker.com/r/apache/iotdb),
diff --git a/example/client-cpp-example/pom.xml b/example/client-cpp-example/pom.xml
index b2dcccd5afb9b..98b2911955399 100644
--- a/example/client-cpp-example/pom.xml
+++ b/example/client-cpp-example/pom.xml
@@ -24,7 +24,7 @@
org.apache.iotdbiotdb-examples
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmlclient-cpp-example
diff --git a/example/flink/pom.xml b/example/flink/pom.xml
index 05b7fb94caec1..e1cdae671cc4f 100644
--- a/example/flink/pom.xml
+++ b/example/flink/pom.xml
@@ -24,7 +24,7 @@
org.apache.iotdbiotdb-examples
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmlflink-example
diff --git a/example/hadoop/pom.xml b/example/hadoop/pom.xml
index 7b642e60e007b..0f3b2689094f1 100644
--- a/example/hadoop/pom.xml
+++ b/example/hadoop/pom.xml
@@ -24,7 +24,7 @@
iotdb-examplesorg.apache.iotdb
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmlhadoop-example
diff --git a/example/jdbc/pom.xml b/example/jdbc/pom.xml
index aaaf718adb022..6aac6c720fc16 100644
--- a/example/jdbc/pom.xml
+++ b/example/jdbc/pom.xml
@@ -23,7 +23,7 @@
iotdb-examplesorg.apache.iotdb
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT4.0.0jdbc-example
diff --git a/example/kafka/pom.xml b/example/kafka/pom.xml
index 1f421180df426..169c3baf4f8a4 100644
--- a/example/kafka/pom.xml
+++ b/example/kafka/pom.xml
@@ -29,7 +29,7 @@
org.apache.iotdbiotdb-examples
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmlkafka-example
diff --git a/example/mqtt-customize/pom.xml b/example/mqtt-customize/pom.xml
index c524fb0ad853c..28a41ccfe88f7 100644
--- a/example/mqtt-customize/pom.xml
+++ b/example/mqtt-customize/pom.xml
@@ -24,7 +24,7 @@
org.apache.iotdbiotdb-examples
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmlcustomize-mqtt-example
diff --git a/example/mqtt/pom.xml b/example/mqtt/pom.xml
index a07fad758fecc..391abddaa2f57 100644
--- a/example/mqtt/pom.xml
+++ b/example/mqtt/pom.xml
@@ -24,7 +24,7 @@
org.apache.iotdbiotdb-examples
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmlmqtt-example
diff --git a/example/pom.xml b/example/pom.xml
index f727f2c53e71d..e099ceeb1fdc0 100644
--- a/example/pom.xml
+++ b/example/pom.xml
@@ -24,7 +24,7 @@
org.apache.iotdbiotdb-parent
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmlpom
diff --git a/example/pulsar/pom.xml b/example/pulsar/pom.xml
index 72967f76e5109..30a25a48a6998 100644
--- a/example/pulsar/pom.xml
+++ b/example/pulsar/pom.xml
@@ -23,7 +23,7 @@
iotdb-examplesorg.apache.iotdb
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xml4.0.0
diff --git a/example/rabbitmq/pom.xml b/example/rabbitmq/pom.xml
index 3737c63c93cbc..5a2a95bbeed74 100644
--- a/example/rabbitmq/pom.xml
+++ b/example/rabbitmq/pom.xml
@@ -24,7 +24,7 @@
org.apache.iotdbiotdb-examples
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmlrabbitmq-example
diff --git a/example/rocketmq/pom.xml b/example/rocketmq/pom.xml
index d3cb1192c90a1..b81a04a8f3b44 100644
--- a/example/rocketmq/pom.xml
+++ b/example/rocketmq/pom.xml
@@ -24,7 +24,7 @@
org.apache.iotdbiotdb-examples
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmlrocketmq-example
diff --git a/example/session/pom.xml b/example/session/pom.xml
index 18de9bdc8539d..2654c8c8388c8 100644
--- a/example/session/pom.xml
+++ b/example/session/pom.xml
@@ -23,7 +23,7 @@
iotdb-examplesorg.apache.iotdb
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT4.0.0client-example
diff --git a/example/session/src/main/java/org/apache/iotdb/SessionExample.java b/example/session/src/main/java/org/apache/iotdb/SessionExample.java
index b49f883bca723..b86a4cbcf793c 100644
--- a/example/session/src/main/java/org/apache/iotdb/SessionExample.java
+++ b/example/session/src/main/java/org/apache/iotdb/SessionExample.java
@@ -451,10 +451,7 @@ private static void insertTabletWithNullValues()
Tablet tablet = new Tablet(ROOT_SG1_D1, schemaList, 100);
// Method 1 to add tablet data
- tablet.bitMaps = new BitMap[schemaList.size()];
- for (int s = 0; s < 3; s++) {
- tablet.bitMaps[s] = new BitMap(tablet.getMaxRowNumber());
- }
+ tablet.initBitMaps();
long timestamp = System.currentTimeMillis();
for (long row = 0; row < 100; row++) {
diff --git a/example/session/src/main/java/org/apache/iotdb/SessionPoolExample.java b/example/session/src/main/java/org/apache/iotdb/SessionPoolExample.java
index 230849d25d1e7..23a1895c22569 100644
--- a/example/session/src/main/java/org/apache/iotdb/SessionPoolExample.java
+++ b/example/session/src/main/java/org/apache/iotdb/SessionPoolExample.java
@@ -32,12 +32,12 @@
public class SessionPoolExample {
- private static SessionPool pool;
+ private static SessionPool sessionPool;
private static ExecutorService service;
- public static void main(String[] args)
- throws StatementExecutionException, IoTDBConnectionException, InterruptedException {
- pool =
+ /** Build a custom SessionPool for this example */
+ private static void constructCustomSessionPool() {
+ sessionPool =
new SessionPool.Builder()
.host("127.0.0.1")
.port(6667)
@@ -45,13 +45,33 @@ public static void main(String[] args)
.password("root")
.maxSize(3)
.build();
- service = Executors.newFixedThreadPool(10);
+ }
+ /** Build a redirect-able SessionPool for this example */
+ private static void constructRedirectSessionPool() {
+ List nodeUrls = new ArrayList<>();
+ nodeUrls.add("127.0.0.1:6667");
+ nodeUrls.add("127.0.0.1:6668");
+ sessionPool =
+ new SessionPool.Builder()
+ .nodeUrls(nodeUrls)
+ .user("root")
+ .password("root")
+ .maxSize(3)
+ .build();
+ }
+
+ public static void main(String[] args)
+ throws StatementExecutionException, IoTDBConnectionException, InterruptedException {
+ // Choose the SessionPool you going to use
+ constructRedirectSessionPool();
+
+ service = Executors.newFixedThreadPool(10);
insertRecord();
queryByRowRecord();
Thread.sleep(1000);
queryByIterator();
- pool.close();
+ sessionPool.close();
service.shutdown();
}
@@ -72,7 +92,7 @@ private static void insertRecord() throws StatementExecutionException, IoTDBConn
values.add(1L);
values.add(2L);
values.add(3L);
- pool.insertRecord(deviceId, time, measurements, types, values);
+ sessionPool.insertRecord(deviceId, time, measurements, types, values);
}
}
@@ -82,7 +102,7 @@ private static void queryByRowRecord() {
() -> {
SessionDataSetWrapper wrapper = null;
try {
- wrapper = pool.executeQueryStatement("select * from root.sg1.d1");
+ wrapper = sessionPool.executeQueryStatement("select * from root.sg1.d1");
System.out.println(wrapper.getColumnNames());
System.out.println(wrapper.getColumnTypes());
while (wrapper.hasNext()) {
@@ -92,7 +112,7 @@ private static void queryByRowRecord() {
e.printStackTrace();
} finally {
// remember to close data set finally!
- pool.closeResultSet(wrapper);
+ sessionPool.closeResultSet(wrapper);
}
});
}
@@ -104,7 +124,7 @@ private static void queryByIterator() {
() -> {
SessionDataSetWrapper wrapper = null;
try {
- wrapper = pool.executeQueryStatement("select * from root.sg1.d1");
+ wrapper = sessionPool.executeQueryStatement("select * from root.sg1.d1");
// get DataIterator like JDBC
DataIterator dataIterator = wrapper.iterator();
System.out.println(wrapper.getColumnNames());
@@ -120,7 +140,7 @@ private static void queryByIterator() {
e.printStackTrace();
} finally {
// remember to close data set finally!
- pool.closeResultSet(wrapper);
+ sessionPool.closeResultSet(wrapper);
}
});
}
diff --git a/example/trigger/pom.xml b/example/trigger/pom.xml
index 90affe003a7fc..32b23f9fd6922 100644
--- a/example/trigger/pom.xml
+++ b/example/trigger/pom.xml
@@ -24,7 +24,7 @@
org.apache.iotdbiotdb-examples
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmltrigger-example
@@ -32,7 +32,7 @@
org.apache.iotdbiotdb-server
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOTprovided
diff --git a/example/tsfile/pom.xml b/example/tsfile/pom.xml
index 13a53a9010804..12057aae987a2 100644
--- a/example/tsfile/pom.xml
+++ b/example/tsfile/pom.xml
@@ -24,7 +24,7 @@
org.apache.iotdbiotdb-examples
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmltsfile-example
diff --git a/example/udf/pom.xml b/example/udf/pom.xml
index 6c4b7ea404a6b..1d6751618437b 100644
--- a/example/udf/pom.xml
+++ b/example/udf/pom.xml
@@ -24,7 +24,7 @@
org.apache.iotdbiotdb-examples
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmludf-example
diff --git a/flink-iotdb-connector/pom.xml b/flink-iotdb-connector/pom.xml
index 02e8d119e7e22..e5ac4aa792cae 100644
--- a/flink-iotdb-connector/pom.xml
+++ b/flink-iotdb-connector/pom.xml
@@ -20,7 +20,7 @@
org.apache.iotdbiotdb-parent
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmlflink-iotdb-connector
diff --git a/flink-tsfile-connector/pom.xml b/flink-tsfile-connector/pom.xml
index aee76b093a0cb..6dc430d996314 100644
--- a/flink-tsfile-connector/pom.xml
+++ b/flink-tsfile-connector/pom.xml
@@ -24,7 +24,7 @@
org.apache.iotdbiotdb-parent
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmlflink-tsfile-connector
diff --git a/flink-tsfile-connector/src/test/java/org/apache/iotdb/flink/tsfile/RowTsFileInputFormatIT.java b/flink-tsfile-connector/src/test/java/org/apache/iotdb/flink/tsfile/RowTsFileInputFormatIT.java
index 2fbee5431e95d..0c36cb61bdcc4 100644
--- a/flink-tsfile-connector/src/test/java/org/apache/iotdb/flink/tsfile/RowTsFileInputFormatIT.java
+++ b/flink-tsfile-connector/src/test/java/org/apache/iotdb/flink/tsfile/RowTsFileInputFormatIT.java
@@ -59,20 +59,20 @@ public void testBatchExecution() throws Exception {
List result = source.map(Row::toString).collect();
Collections.sort(result);
String[] expected = {
- "1,1.2,20,null,2.3,11,19",
- "10,null,20,50,25.4,10,21",
- "11,1.4,21,null,null,null,null",
- "12,1.2,20,51,null,null,null",
- "14,7.2,10,11,null,null,null",
- "15,6.2,20,21,null,null,null",
- "16,9.2,30,31,null,null,null",
- "2,null,20,50,25.4,10,21",
- "3,1.4,21,null,null,null,null",
- "4,1.2,20,51,null,null,null",
- "6,7.2,10,11,null,null,null",
- "7,6.2,20,21,null,null,null",
- "8,9.2,30,31,null,null,null",
- "9,1.2,20,null,2.3,11,19"
+ "+I[1, 1.2, 20, null, 2.3, 11, 19]",
+ "+I[10, null, 20, 50, 25.4, 10, 21]",
+ "+I[11, 1.4, 21, null, null, null, null]",
+ "+I[12, 1.2, 20, 51, null, null, null]",
+ "+I[14, 7.2, 10, 11, null, null, null]",
+ "+I[15, 6.2, 20, 21, null, null, null]",
+ "+I[16, 9.2, 30, 31, null, null, null]",
+ "+I[2, null, 20, 50, 25.4, 10, 21]",
+ "+I[3, 1.4, 21, null, null, null, null]",
+ "+I[4, 1.2, 20, 51, null, null, null]",
+ "+I[6, 7.2, 10, 11, null, null, null]",
+ "+I[7, 6.2, 20, 21, null, null, null]",
+ "+I[8, 9.2, 30, 31, null, null, null]",
+ "+I[9, 1.2, 20, null, 2.3, 11, 19]"
};
assertArrayEquals(expected, result.toArray());
}
@@ -88,20 +88,20 @@ public void testStreamExecution() {
.sorted()
.toArray(String[]::new);
String[] expected = {
- "1,1.2,20,null,2.3,11,19",
- "10,null,20,50,25.4,10,21",
- "11,1.4,21,null,null,null,null",
- "12,1.2,20,51,null,null,null",
- "14,7.2,10,11,null,null,null",
- "15,6.2,20,21,null,null,null",
- "16,9.2,30,31,null,null,null",
- "2,null,20,50,25.4,10,21",
- "3,1.4,21,null,null,null,null",
- "4,1.2,20,51,null,null,null",
- "6,7.2,10,11,null,null,null",
- "7,6.2,20,21,null,null,null",
- "8,9.2,30,31,null,null,null",
- "9,1.2,20,null,2.3,11,19"
+ "+I[1, 1.2, 20, null, 2.3, 11, 19]",
+ "+I[10, null, 20, 50, 25.4, 10, 21]",
+ "+I[11, 1.4, 21, null, null, null, null]",
+ "+I[12, 1.2, 20, 51, null, null, null]",
+ "+I[14, 7.2, 10, 11, null, null, null]",
+ "+I[15, 6.2, 20, 21, null, null, null]",
+ "+I[16, 9.2, 30, 31, null, null, null]",
+ "+I[2, null, 20, 50, 25.4, 10, 21]",
+ "+I[3, 1.4, 21, null, null, null, null]",
+ "+I[4, 1.2, 20, 51, null, null, null]",
+ "+I[6, 7.2, 10, 11, null, null, null]",
+ "+I[7, 6.2, 20, 21, null, null, null]",
+ "+I[8, 9.2, 30, 31, null, null, null]",
+ "+I[9, 1.2, 20, null, 2.3, 11, 19]"
};
assertArrayEquals(expected, result);
}
diff --git a/grafana-connector/pom.xml b/grafana-connector/pom.xml
index 717d4c6240fe8..d894ca877992b 100644
--- a/grafana-connector/pom.xml
+++ b/grafana-connector/pom.xml
@@ -24,7 +24,7 @@
org.apache.iotdbiotdb-parent
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xml0.14.1
- 0.8
+ 0.92.10.54.8-11.3.1
@@ -302,21 +302,6 @@
jetty-webapp${jetty.version}
-
- io.dropwizard.metrics
- metrics-core
- ${metrics.version}
-
-
- io.dropwizard.metrics
- metrics-jvm
- ${metrics.version}
-
-
- io.dropwizard.metrics
- metrics-json
- ${metrics.version}
- me.tongfeiprogressbar
@@ -683,6 +668,8 @@
.pytest_cache/**venv/**apache_iotdb.egg-info/**
+ **/iotdb/thrift/__init__.py
+ **/iotdb/thrift/rpc/__init__.py**/resources/META-INF/services/**
@@ -1054,7 +1041,7 @@
windows-x86_64
- http://artfiles.org/apache.org/thrift/${thrift.version}/thrift-${thrift.version}.exe
+ http://archive.apache.org/dist/thrift/${thrift.version}/thrift-${thrift.version}.exethrift-${thrift.version}-win-x86_64.exetrueecho
diff --git a/server/pom.xml b/server/pom.xml
index 6a83ac93e746e..fc9cc02630fc1 100644
--- a/server/pom.xml
+++ b/server/pom.xml
@@ -24,7 +24,7 @@
org.apache.iotdbiotdb-parent
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmliotdb-server
@@ -45,6 +45,11 @@
iotdb-antlr${project.version}
+
+ org.apache.iotdb
+ iotdb-session
+ ${project.version}
+ org.apache.iotdbtsfile
@@ -105,18 +110,6 @@
openapi${project.version}
-
- io.dropwizard.metrics
- metrics-core
-
-
- io.dropwizard.metrics
- metrics-jvm
-
-
- io.dropwizard.metrics
- metrics-json
- commons-clicommons-cli
@@ -138,6 +131,10 @@
com.zaxxerHikariCP
+
+ io.dropwizard.metrics
+ metrics-jvm
+
@@ -226,6 +223,7 @@
junitjunit
+ testorg.apache.iotdb
diff --git a/server/src/assembly/resources/conf/iotdb-engine.properties b/server/src/assembly/resources/conf/iotdb-engine.properties
index a4a02484ee56e..afa416c12903d 100644
--- a/server/src/assembly/resources/conf/iotdb-engine.properties
+++ b/server/src/assembly/resources/conf/iotdb-engine.properties
@@ -505,6 +505,12 @@ timestamp_precision=ms
# Datatype: int
# query_timeout_threshold=60000
+# The number of sub compaction threads to be set up to perform compaction.
+# Currently only works for nonAligned data in cross space compaction and unseq inner space compaction.
+# Set to 1 when less than or equal to 0.
+# Datatype: int
+# sub_compaction_thread_num=4
+
####################
### Metadata Cache Configuration
####################
@@ -586,10 +592,6 @@ timestamp_precision=ms
### performance statistic configuration
####################
-# Is stat performance of sub-module enable
-# Datatype: boolean
-# enable_performance_stat=false
-
# Uncomment following fields to configure the tracing root directory.
# For Window platform, the index is as follows:
# tracing_dir=data\\tracing
@@ -925,4 +927,4 @@ timestamp_precision=ms
### Group By Fill Configuration
####################
# Datatype: float
-# group_by_fill_cache_size_in_mb=1.0
+# group_by_fill_cache_size_in_mb=1.0
\ No newline at end of file
diff --git a/server/src/assembly/resources/sbin/stop-server.bat b/server/src/assembly/resources/sbin/stop-server.bat
index b1dbf7cf83b47..a2f85581fae7d 100755
--- a/server/src/assembly/resources/sbin/stop-server.bat
+++ b/server/src/assembly/resources/sbin/stop-server.bat
@@ -22,7 +22,7 @@
set current_dir=%~dp0
set superior_dir=%current_dir%\..\
-for /f "eol=; tokens=2,2 delims==" %%i in ('findstr /i "rpc_port"
+for /f "eol=; tokens=2,2 delims==" %%i in ('findstr /i "^rpc_port"
%superior_dir%\conf\iotdb-engine.properties') do (
set rpc_port=%%i
)
diff --git a/server/src/assembly/resources/sbin/stop-server.sh b/server/src/assembly/resources/sbin/stop-server.sh
index 5fb53f52ce5d4..0cd069a8fb58a 100755
--- a/server/src/assembly/resources/sbin/stop-server.sh
+++ b/server/src/assembly/resources/sbin/stop-server.sh
@@ -21,11 +21,18 @@
IOTDB_CONF="`dirname "$0"`/../conf"
rpc_port=`sed '/^rpc_port=/!d;s/.*=//' ${IOTDB_CONF}/iotdb-engine.properties`
-if type lsof > /dev/null; then
- PID=$(lsof -t -i:${rpc_port})
+
+if type lsof > /dev/null 2>&1 ; then
+ PID=$(lsof -t -i:${rpc_port} -sTCP:LISTEN)
+elif type netstat > /dev/null 2>&1 ; then
+ PID=$(netstat -anp 2>/dev/null | grep ":${rpc_port} " | grep ' LISTEN ' | awk '{print $NF}' | sed "s|/.*||g" )
else
- PID=$(ps ax | grep -i 'IoTDB' | grep java | grep -v grep | awk '{print $1}')
+ echo ""
+ echo " Error: No necessary tool."
+ echo " Please install 'lsof' or 'netstat'."
+ exit 1
fi
+
if [ -z "$PID" ]; then
echo "No IoTDB server to stop"
exit 1
diff --git a/server/src/assembly/resources/tools/tsfileToolSet/validate-tsfile.bat b/server/src/assembly/resources/tools/tsfileToolSet/validate-tsfile.bat
new file mode 100644
index 0000000000000..dcd22f0c2af9a
--- /dev/null
+++ b/server/src/assembly/resources/tools/tsfileToolSet/validate-tsfile.bat
@@ -0,0 +1,62 @@
+@REM
+@REM Licensed to the Apache Software Foundation (ASF) under one
+@REM or more contributor license agreements. See the NOTICE file
+@REM distributed with this work for additional information
+@REM regarding copyright ownership. The ASF licenses this file
+@REM to you under the Apache License, Version 2.0 (the
+@REM "License"); you may not use this file except in compliance
+@REM with the License. You may obtain a copy of the License at
+@REM
+@REM http://www.apache.org/licenses/LICENSE-2.0
+@REM
+@REM Unless required by applicable law or agreed to in writing,
+@REM software distributed under the License is distributed on an
+@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+@REM KIND, either express or implied. See the License for the
+@REM specific language governing permissions and limitations
+@REM under the License.
+@REM
+
+
+@echo off
+echo ````````````````````````
+echo Starting Validating the TsFile
+echo ````````````````````````
+
+if "%OS%" == "Windows_NT" setlocal
+
+pushd %~dp0..\..
+if NOT DEFINED IOTDB_HOME set IOTDB_HOME=%CD%
+popd
+
+if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.db.tools.validate.TsFileValidationTool
+if NOT DEFINED JAVA_HOME goto :err
+
+@REM -----------------------------------------------------------------------------
+@REM ***** CLASSPATH library setting *****
+@REM Ensure that any user defined CLASSPATH variables are not used on startup
+set CLASSPATH="%IOTDB_HOME%\lib\*"
+
+goto okClasspath
+
+:append
+set CLASSPATH=%CLASSPATH%;%1
+goto :eof
+
+@REM -----------------------------------------------------------------------------
+:okClasspath
+
+"%JAVA_HOME%\bin\java" -cp "%CLASSPATH%" %MAIN_CLASS% %*
+
+goto finally
+
+
+:err
+echo JAVA_HOME environment variable must be set!
+pause
+
+
+@REM -----------------------------------------------------------------------------
+:finally
+
+ENDLOCAL
diff --git a/server/src/assembly/resources/tools/tsfileToolSet/validate-tsfile.sh b/server/src/assembly/resources/tools/tsfileToolSet/validate-tsfile.sh
new file mode 100644
index 0000000000000..b32b4021dca42
--- /dev/null
+++ b/server/src/assembly/resources/tools/tsfileToolSet/validate-tsfile.sh
@@ -0,0 +1,48 @@
+#!/bin/sh
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+echo ---------------------
+echo Starting Validating the TsFile
+echo ---------------------
+
+if [ -z "${IOTDB_HOME}" ]; then
+ export IOTDB_HOME="$(cd "`dirname "$0"`"/../..; pwd)"
+fi
+
+if [ -n "$JAVA_HOME" ]; then
+ for java in "$JAVA_HOME"/bin/amd64/java "$JAVA_HOME"/bin/java; do
+ if [ -x "$java" ]; then
+ JAVA="$java"
+ break
+ fi
+ done
+else
+ JAVA=java
+fi
+
+CLASSPATH=""
+for f in ${IOTDB_HOME}/lib/*.jar; do
+ CLASSPATH=${CLASSPATH}":"$f
+done
+
+MAIN_CLASS=org.apache.iotdb.db.tools.validate.TsFileValidationTool
+
+"$JAVA" -cp "$CLASSPATH" "$MAIN_CLASS" "$@"
+exit $?
diff --git a/server/src/main/java/org/apache/iotdb/db/auth/AuthorityChecker.java b/server/src/main/java/org/apache/iotdb/db/auth/AuthorityChecker.java
index 43b56826dda81..6a144118bc4b3 100644
--- a/server/src/main/java/org/apache/iotdb/db/auth/AuthorityChecker.java
+++ b/server/src/main/java/org/apache/iotdb/db/auth/AuthorityChecker.java
@@ -120,6 +120,7 @@ private static int translateToPermissionId(Operator.OperatorType type) {
case SET_STORAGE_GROUP:
return PrivilegeType.SET_STORAGE_GROUP.ordinal();
case CREATE_TIMESERIES:
+ case CREATE_ALIGNED_TIMESERIES:
return PrivilegeType.CREATE_TIMESERIES.ordinal();
case DELETE_TIMESERIES:
case DELETE:
@@ -141,6 +142,9 @@ private static int translateToPermissionId(Operator.OperatorType type) {
case LOAD_DATA:
case CREATE_INDEX:
case BATCH_INSERT:
+ case BATCH_INSERT_ONE_DEVICE:
+ case BATCH_INSERT_ROWS:
+ case MULTI_BATCH_INSERT:
return PrivilegeType.INSERT_TIMESERIES.ordinal();
case LIST_ROLE:
case LIST_ROLE_USERS:
diff --git a/server/src/main/java/org/apache/iotdb/db/concurrent/ThreadName.java b/server/src/main/java/org/apache/iotdb/db/concurrent/ThreadName.java
index 18ab028afcc55..e94b5af126950 100644
--- a/server/src/main/java/org/apache/iotdb/db/concurrent/ThreadName.java
+++ b/server/src/main/java/org/apache/iotdb/db/concurrent/ThreadName.java
@@ -37,6 +37,7 @@ public enum ThreadName {
FLUSH_SERVICE("Flush"),
FLUSH_SUB_TASK_SERVICE("Flush-SubTask"),
COMPACTION_SERVICE("Compaction"),
+ COMPACTION_SUB_SERVICE("Sub-Compaction"),
COMPACTION_SCHEDULE("Compaction_Schedule"),
WAL_DAEMON("WAL-Sync"),
WAL_FORCE_DAEMON("WAL-Force"),
diff --git a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
index fb195d35422ca..d33e8a0ff3cc8 100644
--- a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
+++ b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
@@ -19,7 +19,7 @@
package org.apache.iotdb.db.conf;
import org.apache.iotdb.db.conf.directories.DirectoryManager;
-import org.apache.iotdb.db.engine.compaction.CompactionPriority;
+import org.apache.iotdb.db.engine.compaction.constant.CompactionPriority;
import org.apache.iotdb.db.engine.compaction.cross.CrossCompactionStrategy;
import org.apache.iotdb.db.engine.compaction.inner.InnerCompactionStrategy;
import org.apache.iotdb.db.engine.storagegroup.timeindex.TimeIndexLevel;
@@ -426,6 +426,12 @@ public class IoTDBConfig {
/** The interval of compaction task submission from queue in CompactionTaskMananger */
private long compactionSubmissionIntervalInMs = 60_000L;
+ /**
+ * The number of sub compaction threads to be set up to perform compaction. Currently only works
+ * for nonAligned data in cross space compaction and unseq inner space compaction.
+ */
+ private int subCompactionTaskNum = 4;
+
/** whether to cache meta data(ChunkMetaData and TsFileMetaData) or not. */
private boolean metaDataCacheEnable = true;
@@ -490,9 +496,6 @@ public class IoTDBConfig {
/** Replace implementation class of influxdb protocol service */
private String influxdbImplClassName = InfluxDBServiceImpl.class.getName();
- /** Is stat performance of sub-module enable. */
- private boolean enablePerformanceStat = false;
-
/** whether use chunkBufferPool. */
private boolean chunkBufferPoolEnable = false;
@@ -814,6 +817,33 @@ public class IoTDBConfig {
/** Encryption provided class parameter */
private String encryptDecryptProviderParameter;
+ // Operation Sync Config
+ private boolean enableOperationSync = false;
+
+ // Secondary IoTDB
+ private String secondaryAddress = "127.0.0.1";
+ private int secondaryPort = 6668;
+ private String secondaryUser = "root";
+ private String secondaryPassword = "root";
+
+ // The transmitting concurrency size of operation sync SessionPool
+ private int OperationSyncSessionConcurrencySize = 8;
+
+ // OperationSyncLog dir
+ private String operationSyncLogDir =
+ DEFAULT_BASE_DIR + File.separator + IoTDBConstant.OPERATION_SYNC_FOLDER_NAME;
+ // The validity of each OperationSyncLog
+ private int operationSyncLogValidity = 30;
+ // The maximum id of OperationSyncLog
+ private int operationSyncLogNum = 32767;
+ // The max size of all the OperationSyncLog. Default is 100GB
+ private long operationSyncMaxLogSize = 107374182400L;
+
+ // OperationSyncProducer DML cache size
+ private int operationSyncProducerCacheSize = 1024;
+ // OperationSyncConsumer concurrency size
+ private int operationSyncConsumerConcurrencySize = 4;
+
public IoTDBConfig() {
// empty constructor
}
@@ -947,6 +977,7 @@ private void formulateFolders() {
extDir = addHomeDir(extDir);
udfDir = addHomeDir(udfDir);
triggerDir = addHomeDir(triggerDir);
+ operationSyncLogDir = addHomeDir(operationSyncLogDir);
if (TSFileDescriptor.getInstance().getConfig().getTSFileStorageFs().equals(FSType.HDFS)) {
String hdfsDir = getHdfsDir();
@@ -1533,14 +1564,6 @@ void setExternalSortThreshold(int externalSortThreshold) {
this.externalSortThreshold = externalSortThreshold;
}
- public boolean isEnablePerformanceStat() {
- return enablePerformanceStat;
- }
-
- public void setEnablePerformanceStat(boolean enablePerformanceStat) {
- this.enablePerformanceStat = enablePerformanceStat;
- }
-
public boolean isEnablePartialInsert() {
return enablePartialInsert;
}
@@ -2512,6 +2535,14 @@ public void setCompactionSubmissionIntervalInMs(long interval) {
compactionSubmissionIntervalInMs = interval;
}
+ public int getSubCompactionTaskNum() {
+ return subCompactionTaskNum;
+ }
+
+ public void setSubCompactionTaskNum(int subCompactionTaskNum) {
+ this.subCompactionTaskNum = subCompactionTaskNum;
+ }
+
public String getDeviceIDTransformationMethod() {
return deviceIDTransformationMethod;
}
@@ -2551,4 +2582,100 @@ public String getEncryptDecryptProviderParameter() {
public void setEncryptDecryptProviderParameter(String encryptDecryptProviderParameter) {
this.encryptDecryptProviderParameter = encryptDecryptProviderParameter;
}
+
+ public boolean isEnableOperationSync() {
+ return enableOperationSync;
+ }
+
+ public void setEnableOperationSync(boolean enableOperationSync) {
+ this.enableOperationSync = enableOperationSync;
+ }
+
+ public String getSecondaryAddress() {
+ return secondaryAddress;
+ }
+
+ public void setSecondaryAddress(String secondaryAddress) {
+ this.secondaryAddress = secondaryAddress;
+ }
+
+ public int getSecondaryPort() {
+ return secondaryPort;
+ }
+
+ public void setSecondaryPort(int secondaryPort) {
+ this.secondaryPort = secondaryPort;
+ }
+
+ public String getSecondaryUser() {
+ return secondaryUser;
+ }
+
+ public void setSecondaryUser(String secondaryUser) {
+ this.secondaryUser = secondaryUser;
+ }
+
+ public String getSecondaryPassword() {
+ return secondaryPassword;
+ }
+
+ public void setSecondaryPassword(String secondaryPassword) {
+ this.secondaryPassword = secondaryPassword;
+ }
+
+ public int getOperationSyncSessionConcurrencySize() {
+ return OperationSyncSessionConcurrencySize;
+ }
+
+ public void setOperationSyncSessionConcurrencySize(int operationSyncSessionConcurrencySize) {
+ this.OperationSyncSessionConcurrencySize = operationSyncSessionConcurrencySize;
+ }
+
+ public String getOperationSyncLogDir() {
+ return operationSyncLogDir;
+ }
+
+ public void setOperationSyncLogDir(String operationSyncLogDir) {
+ this.operationSyncLogDir = operationSyncLogDir;
+ }
+
+ public int getOperationSyncLogValidity() {
+ return operationSyncLogValidity;
+ }
+
+ public void setOperationSyncLogValidity(int operationSyncLogValidity) {
+ this.operationSyncLogValidity = operationSyncLogValidity;
+ }
+
+ public int getOperationSyncLogNum() {
+ return operationSyncLogNum;
+ }
+
+ public void setOperationSyncLogNum(int operationSyncLogNum) {
+ this.operationSyncLogNum = operationSyncLogNum;
+ }
+
+ public long getOperationSyncMaxLogSize() {
+ return operationSyncMaxLogSize;
+ }
+
+ public void setOperationSyncMaxLogSize(long operationSyncMaxLogSize) {
+ this.operationSyncMaxLogSize = operationSyncMaxLogSize;
+ }
+
+ public int getOperationSyncProducerCacheSize() {
+ return operationSyncProducerCacheSize;
+ }
+
+ public void setOperationSyncProducerCacheSize(int operationSyncProducerCacheSize) {
+ this.operationSyncProducerCacheSize = operationSyncProducerCacheSize;
+ }
+
+ public int getOperationSyncConsumerConcurrencySize() {
+ return operationSyncConsumerConcurrencySize;
+ }
+
+ public void setOperationSyncConsumerConcurrencySize(int operationSyncConsumerConcurrencySize) {
+ this.operationSyncConsumerConcurrencySize = operationSyncConsumerConcurrencySize;
+ }
}
diff --git a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfigCheck.java b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfigCheck.java
index d4492c7ceaff2..c9bc95a8031f6 100644
--- a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfigCheck.java
+++ b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfigCheck.java
@@ -256,6 +256,7 @@ private void upgradePropertiesFile() throws IOException {
properties.setProperty(k, v);
}
});
+ properties.setProperty(IOTDB_VERSION_STRING, IoTDBConstant.VERSION);
properties.store(tmpFOS, SYSTEM_PROPERTIES_STRING);
// upgrade finished, delete old system.properties file
@@ -284,7 +285,7 @@ private void upgradePropertiesFileFromBrokenFile() throws IOException {
properties.setProperty(k, v);
}
});
-
+ properties.setProperty(IOTDB_VERSION_STRING, IoTDBConstant.VERSION);
properties.store(tmpFOS, SYSTEM_PROPERTIES_STRING);
// upgrade finished, delete old system.properties file
if (propertiesFile.exists()) {
diff --git a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConstant.java b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConstant.java
index feab1abd37ac1..21228f3cd73e6 100644
--- a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConstant.java
+++ b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConstant.java
@@ -154,6 +154,9 @@ private IoTDBConstant() {}
public static final String UDF_FOLDER_NAME = "udf";
public static final String TRIGGER_FOLDER_NAME = "trigger";
+ // Operation Sync folder name
+ public static final String OPERATION_SYNC_FOLDER_NAME = "operationsync";
+
// mqtt
public static final String ENABLE_MQTT = "enable_mqtt_service";
public static final String MQTT_HOST_NAME = "mqtt_host";
diff --git a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
index 3167a54f7e05d..5ff5d12683144 100644
--- a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
+++ b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
@@ -20,7 +20,7 @@
import org.apache.iotdb.db.conf.directories.DirectoryManager;
import org.apache.iotdb.db.engine.StorageEngine;
-import org.apache.iotdb.db.engine.compaction.CompactionPriority;
+import org.apache.iotdb.db.engine.compaction.constant.CompactionPriority;
import org.apache.iotdb.db.engine.compaction.cross.CrossCompactionStrategy;
import org.apache.iotdb.db.engine.compaction.inner.InnerCompactionStrategy;
import org.apache.iotdb.db.exception.query.QueryProcessException;
@@ -361,6 +361,13 @@ private void loadProps() {
properties.getProperty(
"compaction_priority", conf.getCompactionPriority().toString())));
+ int subtaskNum =
+ Integer.parseInt(
+ properties.getProperty(
+ "sub_compaction_thread_num", Integer.toString(conf.getSubCompactionTaskNum())));
+ subtaskNum = subtaskNum <= 0 ? 1 : subtaskNum;
+ conf.setSubCompactionTaskNum(subtaskNum);
+
conf.setQueryTimeoutThreshold(
Integer.parseInt(
properties.getProperty(
@@ -559,13 +566,6 @@ private void loadProps() {
"mtree_snapshot_threshold_time",
Integer.toString(conf.getMtreeSnapshotThresholdTime()))));
- conf.setEnablePerformanceStat(
- Boolean.parseBoolean(
- properties
- .getProperty(
- "enable_performance_stat", Boolean.toString(conf.isEnablePerformanceStat()))
- .trim()));
-
int maxConcurrentClientNum =
Integer.parseInt(
properties.getProperty(
@@ -783,6 +783,62 @@ private void loadProps() {
"iotdb_server_encrypt_decrypt_provider_parameter",
conf.getEncryptDecryptProviderParameter()));
+ // set OperationSync config
+ conf.setEnableOperationSync(
+ Boolean.parseBoolean(
+ properties.getProperty(
+ "enable_operation_sync", String.valueOf(conf.isEnableOperationSync()))));
+
+ conf.setSecondaryAddress(
+ properties.getProperty("secondary_address", conf.getSecondaryAddress()));
+
+ conf.setSecondaryPort(
+ Integer.parseInt(
+ properties.getProperty("secondary_port", String.valueOf(conf.getSecondaryPort()))));
+
+ conf.setSecondaryUser(properties.getProperty("secondary_user", conf.getSecondaryUser()));
+
+ conf.setSecondaryPassword(
+ properties.getProperty("secondary_password", conf.getSecondaryPassword()));
+
+ conf.setOperationSyncSessionConcurrencySize(
+ Integer.parseInt(
+ properties.getProperty(
+ "operation_sync_session_concurrency_size",
+ String.valueOf(conf.getOperationSyncSessionConcurrencySize()))));
+
+ conf.setOperationSyncLogDir(
+ properties.getProperty("operation_sync_log_dir", conf.getOperationSyncLogDir()));
+
+ conf.setOperationSyncLogValidity(
+ Integer.parseInt(
+ properties.getProperty(
+ "operation_sync_log_file_validity",
+ String.valueOf(conf.getOperationSyncLogValidity()))));
+
+ conf.setOperationSyncLogNum(
+ Integer.parseInt(
+ properties.getProperty(
+ "operation_sync_log_file_num", String.valueOf(conf.getOperationSyncLogNum()))));
+
+ conf.setOperationSyncMaxLogSize(
+ Long.parseLong(
+ properties.getProperty(
+ "operation_sync_max_log_size",
+ String.valueOf(conf.getOperationSyncMaxLogSize()))));
+
+ conf.setOperationSyncProducerCacheSize(
+ Integer.parseInt(
+ properties.getProperty(
+ "operation_sync_producer_cache_size",
+ String.valueOf(conf.getOperationSyncProducerCacheSize()))));
+
+ conf.setOperationSyncConsumerConcurrencySize(
+ Integer.parseInt(
+ properties.getProperty(
+ "operation_sync_consumer_concurrency_size",
+ String.valueOf(conf.getOperationSyncConsumerConcurrencySize()))));
+
// At the same time, set TSFileConfig
TSFileDescriptor.getInstance()
.getConfig()
diff --git a/server/src/main/java/org/apache/iotdb/db/doublelive/OperationSyncConsumer.java b/server/src/main/java/org/apache/iotdb/db/doublelive/OperationSyncConsumer.java
new file mode 100644
index 0000000000000..3f05766d532e7
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/doublelive/OperationSyncConsumer.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.db.doublelive;
+
+import org.apache.iotdb.rpc.IoTDBConnectionException;
+import org.apache.iotdb.session.pool.SessionPool;
+import org.apache.iotdb.tsfile.utils.Pair;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.concurrent.BlockingQueue;
+
+public class OperationSyncConsumer implements Runnable {
+ private static final Logger LOGGER = LoggerFactory.getLogger(OperationSyncConsumer.class);
+
+ private final BlockingQueue>
+ OperationSyncQueue;
+ private final SessionPool operationSyncSessionPool;
+ private final OperationSyncLogService dmlLogService;
+
+ public OperationSyncConsumer(
+ BlockingQueue>
+ OperationSyncQueue,
+ SessionPool operationSyncSessionPool,
+ OperationSyncLogService dmlLogService) {
+ this.OperationSyncQueue = OperationSyncQueue;
+ this.operationSyncSessionPool = operationSyncSessionPool;
+ this.dmlLogService = dmlLogService;
+ }
+
+ @Override
+ public void run() {
+ while (true) {
+ Pair head;
+ ByteBuffer headBuffer;
+ OperationSyncPlanTypeUtils.OperationSyncPlanType headType;
+ try {
+ head = OperationSyncQueue.take();
+ headBuffer = head.left;
+ headType = head.right;
+ } catch (InterruptedException e) {
+ LOGGER.error("OperationSyncConsumer been interrupted: ", e);
+ continue;
+ }
+
+ headBuffer.position(0);
+ boolean transmitStatus = false;
+ try {
+ headBuffer.position(0);
+ transmitStatus = operationSyncSessionPool.operationSyncTransmit(headBuffer);
+ } catch (IoTDBConnectionException connectionException) {
+ // warn IoTDBConnectionException and do serialization
+ LOGGER.warn(
+ "OperationSyncConsumer can't transmit because network failure", connectionException);
+ } catch (Exception e) {
+ // The PhysicalPlan has internal error, reject transmit
+ LOGGER.error("OperationSyncConsumer can't transmit", e);
+ continue;
+ }
+
+ if (!transmitStatus) {
+ try {
+ // must set buffer position to limit() before serialization
+ headBuffer.position(headBuffer.limit());
+ dmlLogService.acquireLogWriter();
+ dmlLogService.write(headBuffer);
+ } catch (IOException e) {
+ LOGGER.error("OperationSyncConsumer can't serialize physicalPlan", e);
+ } finally {
+ dmlLogService.releaseLogWriter();
+ }
+ }
+ }
+ }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/doublelive/OperationSyncDDLProtector.java b/server/src/main/java/org/apache/iotdb/db/doublelive/OperationSyncDDLProtector.java
new file mode 100644
index 0000000000000..b2e70c5042dc3
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/doublelive/OperationSyncDDLProtector.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.db.doublelive;
+
+import org.apache.iotdb.db.qp.physical.PhysicalPlan;
+import org.apache.iotdb.rpc.IoTDBConnectionException;
+import org.apache.iotdb.session.pool.SessionPool;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.nio.ByteBuffer;
+import java.util.concurrent.TimeUnit;
+
+public class OperationSyncDDLProtector extends OperationSyncProtector {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(OperationSyncDDLProtector.class);
+
+ private final SessionPool operationSyncSessionPool;
+
+ public OperationSyncDDLProtector(SessionPool operationSyncSessionPool) {
+ super();
+ this.operationSyncSessionPool = operationSyncSessionPool;
+ }
+
+ @Override
+ protected void preCheck() {
+ // do nothing
+ }
+
+ @Override
+ protected void transmitPhysicalPlan(ByteBuffer planBuffer, PhysicalPlan physicalPlan) {
+ while (true) {
+ // transmit E-Plan until it's been received
+ boolean transmitStatus = false;
+
+ try {
+ // try operation sync
+ planBuffer.position(0);
+ transmitStatus = operationSyncSessionPool.operationSyncTransmit(planBuffer);
+ } catch (IoTDBConnectionException connectionException) {
+ // warn IoTDBConnectionException and retry
+ LOGGER.warn("OperationSyncDDLProtector can't transmit, retrying...", connectionException);
+ } catch (Exception e) {
+ // error exception and break
+ LOGGER.error("OperationSyncDDLProtector can't transmit", e);
+ break;
+ }
+
+ if (transmitStatus) {
+ break;
+ } else {
+ try {
+ TimeUnit.SECONDS.sleep(1);
+ } catch (InterruptedException e) {
+ LOGGER.warn("OperationSyncDDLProtector is interrupted", e);
+ }
+ }
+ }
+ }
+
+ public boolean isAtWork() {
+ return isProtectorAtWork;
+ }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/doublelive/OperationSyncDMLProtector.java b/server/src/main/java/org/apache/iotdb/db/doublelive/OperationSyncDMLProtector.java
new file mode 100644
index 0000000000000..5668d918f7c5f
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/doublelive/OperationSyncDMLProtector.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.db.doublelive;
+
+import org.apache.iotdb.db.qp.physical.PhysicalPlan;
+import org.apache.iotdb.tsfile.utils.Pair;
+
+import java.nio.ByteBuffer;
+import java.util.concurrent.TimeUnit;
+
+public class OperationSyncDMLProtector extends OperationSyncProtector {
+
+ private final OperationSyncDDLProtector ddlProtector;
+ private final OperationSyncProducer producer;
+
+ public OperationSyncDMLProtector(
+ OperationSyncDDLProtector ddlProtector, OperationSyncProducer producer) {
+ super();
+ this.ddlProtector = ddlProtector;
+ this.producer = producer;
+ }
+
+ @Override
+ protected void preCheck() {
+ while (ddlProtector.isAtWork()) {
+ try {
+ TimeUnit.SECONDS.sleep(5);
+ } catch (InterruptedException ignore) {
+ // ignore and retry
+ }
+ }
+ }
+
+ @Override
+ protected void transmitPhysicalPlan(ByteBuffer planBuffer, PhysicalPlan physicalPlan) {
+ producer.put(
+ new Pair<>(planBuffer, OperationSyncPlanTypeUtils.getOperationSyncPlanType(physicalPlan)));
+ }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/doublelive/OperationSyncLogService.java b/server/src/main/java/org/apache/iotdb/db/doublelive/OperationSyncLogService.java
new file mode 100644
index 0000000000000..a11d3fee286a4
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/doublelive/OperationSyncLogService.java
@@ -0,0 +1,219 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.db.doublelive;
+
+import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.engine.fileSystem.SystemFileFactory;
+import org.apache.iotdb.db.writelog.io.LogWriter;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+public class OperationSyncLogService implements Runnable {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(OperationSyncLogService.class);
+
+ private static final String logFileDir =
+ IoTDBDescriptor.getInstance().getConfig().getOperationSyncLogDir();
+ private static final long logFileValidity =
+ IoTDBDescriptor.getInstance().getConfig().getOperationSyncLogValidity() * 1000L;
+ private static final int maxLogFileNum =
+ IoTDBDescriptor.getInstance().getConfig().getOperationSyncLogNum();
+ private static final long maxLogFileSize =
+ IoTDBDescriptor.getInstance().getConfig().getOperationSyncMaxLogSize();
+
+ private static long currentLogFileSize = 0;
+
+ private final OperationSyncProtector protector;
+ private final Lock logWriterLock;
+ private final String logFileName;
+ private int logFileID;
+ private long logFileCreateTime;
+ private File logFile;
+ private LogWriter logWriter;
+
+ public OperationSyncLogService(String logFileName, OperationSyncProtector protector) {
+ this.logFileName = logFileName;
+ this.protector = protector;
+
+ this.logWriterLock = new ReentrantLock();
+ this.logFile = null;
+ this.logWriter = null;
+
+ File logDir = new File(logFileDir);
+ if (!logDir.exists()) {
+ if (!logDir.mkdirs()) {
+ LOGGER.error("Can't make OperationSyncLog file dir: {}", logDir.getAbsolutePath());
+ }
+ }
+ }
+
+ @Override
+ public void run() {
+ // Check if there exists remnant logs
+ List logFileIDList = new ArrayList<>();
+ for (int ID = 0; ID < maxLogFileNum; ID++) {
+ File file =
+ SystemFileFactory.INSTANCE.getFile(logFileDir + File.separator + logFileName + ID);
+ if (file.exists()) {
+ logFileIDList.add(ID);
+ }
+ }
+
+ int firstID = 0;
+ if (logFileIDList.size() > 0) {
+ // Re-transmit the remnant logs
+ for (int i = 0; i < logFileIDList.size() - 1; i++) {
+ if (logFileIDList.get(i + 1) - logFileIDList.get(i) > 1) {
+ firstID = i + 1;
+ break;
+ }
+ }
+
+ for (int i = firstID; i < logFileIDList.size(); i++) {
+ protector.registerLogFile(logFileDir + File.separator + logFileName + logFileIDList.get(i));
+ }
+ for (int i = 0; i < firstID; i++) {
+ protector.registerLogFile(logFileDir + File.separator + logFileName + logFileIDList.get(i));
+ }
+
+ int nextID;
+ if (firstID == 0) {
+ nextID = logFileIDList.get(logFileIDList.size() - 1) + 1;
+ } else {
+ nextID = logFileIDList.get(firstID - 1) + 1;
+ }
+ logFileID = nextID % maxLogFileNum;
+ } else {
+ logFileID = 0;
+ }
+
+ while (true) {
+ // Check the validity of logFile
+ logWriterLock.lock();
+ try {
+ if (logWriter != null && System.currentTimeMillis() - logFileCreateTime > logFileValidity) {
+ // Submit logFile when it's expired
+ submitLogFile();
+ }
+ } finally {
+ logWriterLock.unlock();
+ }
+
+ try {
+ // Sleep 10s before next check
+ TimeUnit.SECONDS.sleep(10);
+ } catch (InterruptedException e) {
+ LOGGER.error("OperationSyncLogService been interrupted", e);
+ }
+ }
+ }
+
+ private void submitLogFile() {
+ try {
+ logWriter.force();
+ } catch (IOException e) {
+ LOGGER.error("Can't force logWrite", e);
+ }
+ incLogFileSize(logFile.length());
+
+ for (int retry = 0; retry < 5; retry++) {
+ try {
+ logWriter.close();
+ } catch (IOException e) {
+ LOGGER.warn("Can't close OperationSyncLog: {}, retrying...", logFile.getAbsolutePath());
+ try {
+ // Sleep 1s and retry
+ TimeUnit.SECONDS.sleep(1);
+ } catch (InterruptedException ignored) {
+ // Ignore and retry
+ }
+ continue;
+ }
+
+ LOGGER.info("OperationSyncLog: {} is expired and closed", logFile.getAbsolutePath());
+ break;
+ }
+
+ protector.registerLogFile(
+ logFileDir
+ + File.separator
+ + logFileName
+ + (logFileID - 1 + maxLogFileNum) % maxLogFileNum);
+
+ logWriter = null;
+ logFile = null;
+ }
+
+ private void createLogFile() {
+ logFile =
+ SystemFileFactory.INSTANCE.getFile(logFileDir + File.separator + logFileName + logFileID);
+ while (true) {
+ try {
+ if (logFile.createNewFile()) {
+ logFileCreateTime = System.currentTimeMillis();
+ logWriter = new LogWriter(logFile, false);
+ LOGGER.info("Create OperationSyncLog: {}", logFile.getAbsolutePath());
+ break;
+ }
+ } catch (IOException e) {
+ LOGGER.warn("Can't create OperationSyncLog: {}, retrying...", logFile.getAbsolutePath());
+ try {
+ TimeUnit.SECONDS.sleep(1);
+ } catch (InterruptedException ignored) {
+ // Ignore and retry
+ }
+ }
+ }
+ logFileID = (logFileID + 1) % maxLogFileNum;
+ }
+
+ public static synchronized void incLogFileSize(long size) {
+ currentLogFileSize = currentLogFileSize + size;
+ }
+
+ public void acquireLogWriter() {
+ logWriterLock.lock();
+ }
+
+ public void write(ByteBuffer buffer) throws IOException {
+ if (currentLogFileSize < maxLogFileSize) {
+ if (logWriter == null) {
+ // Create logFile when there are no valid
+ createLogFile();
+ }
+ logWriter.write(buffer);
+ } else {
+ LOGGER.error("The OperationSyncLog is full, new PhysicalPlans will be discarded.");
+ }
+ }
+
+ public void releaseLogWriter() {
+ logWriterLock.unlock();
+ }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/doublelive/OperationSyncPlanTypeUtils.java b/server/src/main/java/org/apache/iotdb/db/doublelive/OperationSyncPlanTypeUtils.java
new file mode 100644
index 0000000000000..a3e21fe5a0ccf
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/doublelive/OperationSyncPlanTypeUtils.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.db.doublelive;
+
+import org.apache.iotdb.db.qp.physical.PhysicalPlan;
+import org.apache.iotdb.db.qp.physical.crud.DeletePlan;
+import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
+import org.apache.iotdb.db.qp.physical.sys.AlterTimeSeriesPlan;
+import org.apache.iotdb.db.qp.physical.sys.CreateAlignedTimeSeriesPlan;
+import org.apache.iotdb.db.qp.physical.sys.CreateMultiTimeSeriesPlan;
+import org.apache.iotdb.db.qp.physical.sys.CreateTimeSeriesPlan;
+import org.apache.iotdb.db.qp.physical.sys.DeleteStorageGroupPlan;
+import org.apache.iotdb.db.qp.physical.sys.DeleteTimeSeriesPlan;
+import org.apache.iotdb.db.qp.physical.sys.SetStorageGroupPlan;
+
+public class OperationSyncPlanTypeUtils {
+
+ public static OperationSyncPlanType getOperationSyncPlanType(PhysicalPlan plan) {
+ if (plan instanceof SetStorageGroupPlan
+ || plan instanceof DeleteStorageGroupPlan
+ || plan instanceof CreateTimeSeriesPlan
+ || plan instanceof CreateMultiTimeSeriesPlan
+ || plan instanceof CreateAlignedTimeSeriesPlan
+ || plan instanceof DeleteTimeSeriesPlan
+ || plan instanceof AlterTimeSeriesPlan) {
+ return OperationSyncPlanType.DDLPlan;
+ } else if (plan instanceof DeletePlan || plan instanceof InsertPlan) {
+ return OperationSyncPlanType.DMLPlan;
+ }
+ return null;
+ }
+
+ public enum OperationSyncPlanType {
+ DDLPlan, // Create, update and delete schema
+ DMLPlan // insert and delete data
+ }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/doublelive/OperationSyncProducer.java b/server/src/main/java/org/apache/iotdb/db/doublelive/OperationSyncProducer.java
new file mode 100644
index 0000000000000..2f23b97e05cfa
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/doublelive/OperationSyncProducer.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.db.doublelive;
+
+import org.apache.iotdb.tsfile.utils.Pair;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.nio.ByteBuffer;
+import java.util.concurrent.BlockingQueue;
+
+/**
+ * OperationSyncProducer using BlockingQueue to cache PhysicalPlan. And persist some PhysicalPlan
+ * when they are too many to transmit
+ */
+public class OperationSyncProducer {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(OperationSyncProducer.class);
+
+ private final BlockingQueue>
+ operationSyncQueue;
+
+ public OperationSyncProducer(
+ BlockingQueue>
+ operationSyncQueue) {
+ this.operationSyncQueue = operationSyncQueue;
+ }
+
+ public void put(Pair planPair) {
+ try {
+ planPair.left.position(0);
+ operationSyncQueue.put(planPair);
+ } catch (InterruptedException e) {
+ LOGGER.error("OperationSync cache failed.", e);
+ }
+ }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/doublelive/OperationSyncProtector.java b/server/src/main/java/org/apache/iotdb/db/doublelive/OperationSyncProtector.java
new file mode 100644
index 0000000000000..a9ff399c3b99d
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/doublelive/OperationSyncProtector.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.db.doublelive;
+
+import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.engine.fileSystem.SystemFileFactory;
+import org.apache.iotdb.db.qp.physical.PhysicalPlan;
+import org.apache.iotdb.db.writelog.io.SingleFileLogReader;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+public abstract class OperationSyncProtector implements Runnable {
+
+ protected static final Logger LOGGER = LoggerFactory.getLogger(OperationSyncProtector.class);
+ protected static final int logFileValidity =
+ IoTDBDescriptor.getInstance().getConfig().getOperationSyncLogValidity();
+
+ // For transmit log files
+ protected final Lock logFileListLock;
+ protected List registeredLogFiles;
+ protected List processingLogFiles;
+
+ // For serialize PhysicalPlan
+ private static final int MAX_PHYSICALPLAN_SIZE = 16 * 1024 * 1024;
+ protected final ByteArrayOutputStream protectorByteStream;
+ protected final DataOutputStream protectorDeserializeStream;
+
+ // Working state
+ protected volatile boolean isProtectorAtWork;
+
+ protected OperationSyncProtector() {
+ logFileListLock = new ReentrantLock();
+ registeredLogFiles = new ArrayList<>();
+
+ protectorByteStream = new ByteArrayOutputStream(MAX_PHYSICALPLAN_SIZE);
+ protectorDeserializeStream = new DataOutputStream(protectorByteStream);
+
+ isProtectorAtWork = false;
+ }
+
+ protected void registerLogFile(String logFile) {
+ logFileListLock.lock();
+ try {
+ registeredLogFiles.add(logFile);
+ } finally {
+ logFileListLock.unlock();
+ }
+ }
+
+ protected void wrapLogFiles() {
+ processingLogFiles = new ArrayList<>(registeredLogFiles);
+ registeredLogFiles = new ArrayList<>();
+ }
+
+ @Override
+ public void run() {
+ while (true) {
+ while (true) {
+ // Wrap and transmit all OperationSyncLogs
+ logFileListLock.lock();
+ try {
+ if (registeredLogFiles.size() > 0) {
+ isProtectorAtWork = true;
+ wrapLogFiles();
+ } else {
+ isProtectorAtWork = false;
+ break;
+ }
+ } finally {
+ logFileListLock.unlock();
+ }
+ if (isProtectorAtWork) {
+ transmitLogFiles();
+ }
+ }
+
+ try {
+ // Sleep a while before next check
+ TimeUnit.SECONDS.sleep(logFileValidity);
+ } catch (InterruptedException e) {
+ LOGGER.warn("OperationSyncProtector been interrupted", e);
+ }
+ }
+ }
+
+ protected void transmitLogFiles() {
+ preCheck();
+ for (String logFileName : processingLogFiles) {
+ File logFile = SystemFileFactory.INSTANCE.getFile(logFileName);
+ SingleFileLogReader logReader;
+ try {
+ logReader = new SingleFileLogReader(logFile);
+ } catch (FileNotFoundException e) {
+ LOGGER.error(
+ "OperationSyncProtector can't open OperationSyncLog: {}, discarded",
+ logFile.getAbsolutePath(),
+ e);
+ continue;
+ }
+
+ while (logReader.hasNext()) {
+ // read and re-serialize the PhysicalPlan
+ PhysicalPlan nextPlan = logReader.next();
+ try {
+ nextPlan.serialize(protectorDeserializeStream);
+ } catch (IOException e) {
+ LOGGER.error("OperationSyncProtector can't serialize PhysicalPlan", e);
+ continue;
+ }
+ ByteBuffer nextBuffer = ByteBuffer.wrap(protectorByteStream.toByteArray());
+ protectorByteStream.reset();
+ transmitPhysicalPlan(nextBuffer, nextPlan);
+ }
+
+ logReader.close();
+ try {
+ // sleep one second then delete OperationSyncLog
+ TimeUnit.SECONDS.sleep(1);
+ } catch (InterruptedException e) {
+ LOGGER.warn("OperationSyncProtector is interrupted", e);
+ }
+
+ OperationSyncLogService.incLogFileSize(-logFile.length());
+
+ boolean deleted = false;
+ for (int retryCnt = 0; retryCnt < 5; retryCnt++) {
+ if (logFile.delete()) {
+ deleted = true;
+ LOGGER.info("OperationSyncLog: {} is deleted.", logFile.getAbsolutePath());
+ break;
+ } else {
+ LOGGER.warn("Delete OperationSyncLog: {} failed. Retrying", logFile.getAbsolutePath());
+ }
+ }
+ if (!deleted) {
+ OperationSyncLogService.incLogFileSize(logFile.length());
+ LOGGER.error("Couldn't delete OperationSyncLog: {}", logFile.getAbsolutePath());
+ }
+ }
+ }
+
+ protected abstract void preCheck();
+
+ protected abstract void transmitPhysicalPlan(ByteBuffer planBuffer, PhysicalPlan physicalPlan);
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/doublelive/OperationSyncWriteTask.java b/server/src/main/java/org/apache/iotdb/db/doublelive/OperationSyncWriteTask.java
new file mode 100644
index 0000000000000..e754be61735f7
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/doublelive/OperationSyncWriteTask.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.db.doublelive;
+
+import org.apache.iotdb.rpc.IoTDBConnectionException;
+import org.apache.iotdb.session.pool.SessionPool;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+/** OperationSyncWriteTask is used for transmit one E-Plan sending by a client */
+public class OperationSyncWriteTask implements Runnable {
+ private static final Logger LOGGER = LoggerFactory.getLogger(OperationSyncWriteTask.class);
+
+ private final ByteBuffer physicalPlanBuffer;
+ private final SessionPool operationSyncSessionPool;
+ private final OperationSyncDDLProtector ddlProtector;
+ private final OperationSyncLogService ddlLogService;
+
+ public OperationSyncWriteTask(
+ ByteBuffer physicalPlanBuffer,
+ SessionPool operationSyncSessionPool,
+ OperationSyncDDLProtector ddlProtector,
+ OperationSyncLogService ddlLogService) {
+ this.physicalPlanBuffer = physicalPlanBuffer;
+ this.operationSyncSessionPool = operationSyncSessionPool;
+ this.ddlProtector = ddlProtector;
+ this.ddlLogService = ddlLogService;
+ }
+
+ @Override
+ public void run() {
+ if (ddlProtector.isAtWork()) {
+ serializeEPlan();
+ } else {
+ boolean transmitStatus = false;
+ try {
+ physicalPlanBuffer.position(0);
+ transmitStatus = operationSyncSessionPool.operationSyncTransmit(physicalPlanBuffer);
+ } catch (IoTDBConnectionException connectionException) {
+ // warn IoTDBConnectionException and do serialization
+ LOGGER.warn(
+ "OperationSyncWriteTask can't transmit because network failure", connectionException);
+ } catch (Exception e) {
+ // The PhysicalPlan has internal error, reject transmit
+ LOGGER.error("OperationSyncWriteTask can't transmit", e);
+ return;
+ }
+ if (!transmitStatus) {
+ serializeEPlan();
+ }
+ }
+ }
+
+ private void serializeEPlan() {
+ // serialize the E-Plan if necessary
+ try {
+ // must set buffer position to limit() before serialization
+ physicalPlanBuffer.position(physicalPlanBuffer.limit());
+ ddlLogService.acquireLogWriter();
+ ddlLogService.write(physicalPlanBuffer);
+ } catch (IOException e) {
+ LOGGER.error("can't serialize current PhysicalPlan", e);
+ } finally {
+ ddlLogService.releaseLogWriter();
+ }
+ }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java b/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java
index 4490734fedab8..bff83f352ddd6 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java
@@ -22,9 +22,9 @@
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.query.control.FileReaderManager;
-import org.apache.iotdb.db.service.metrics.Metric;
import org.apache.iotdb.db.service.metrics.MetricsService;
-import org.apache.iotdb.db.service.metrics.Tag;
+import org.apache.iotdb.db.service.metrics.enums.Metric;
+import org.apache.iotdb.db.service.metrics.enums.Tag;
import org.apache.iotdb.db.utils.TestOnly;
import org.apache.iotdb.metrics.config.MetricConfigDescriptor;
import org.apache.iotdb.metrics.utils.MetricLevel;
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java b/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java
index 71c9086600ede..7f217b244f75c 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java
@@ -23,9 +23,9 @@
import org.apache.iotdb.db.conf.IoTDBConstant;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.query.control.FileReaderManager;
-import org.apache.iotdb.db.service.metrics.Metric;
import org.apache.iotdb.db.service.metrics.MetricsService;
-import org.apache.iotdb.db.service.metrics.Tag;
+import org.apache.iotdb.db.service.metrics.enums.Metric;
+import org.apache.iotdb.db.service.metrics.enums.Tag;
import org.apache.iotdb.db.utils.TestOnly;
import org.apache.iotdb.metrics.config.MetricConfigDescriptor;
import org.apache.iotdb.metrics.utils.MetricLevel;
@@ -142,14 +142,13 @@ public static TimeSeriesMetadataCache getInstance() {
return TimeSeriesMetadataCache.TimeSeriesMetadataCacheHolder.INSTANCE;
}
- public TimeseriesMetadata get(TimeSeriesMetadataCacheKey key, Set allSensors)
- throws IOException {
- return get(key, allSensors, false);
- }
-
@SuppressWarnings("squid:S1860") // Suppress synchronize warning
public TimeseriesMetadata get(
- TimeSeriesMetadataCacheKey key, Set allSensors, boolean debug) throws IOException {
+ TimeSeriesMetadataCacheKey key,
+ Set allSensors,
+ boolean ignoreNotExists,
+ boolean debug)
+ throws IOException {
if (!CACHE_ENABLE) {
// bloom filter part
TsFileSequenceReader reader = FileReaderManager.getInstance().get(key.filePath, true);
@@ -159,7 +158,7 @@ public TimeseriesMetadata get(
return null;
}
TimeseriesMetadata timeseriesMetadata =
- reader.readTimeseriesMetadata(new Path(key.device, key.measurement), false);
+ reader.readTimeseriesMetadata(new Path(key.device, key.measurement), ignoreNotExists);
return (timeseriesMetadata == null || timeseriesMetadata.getStatistics().getCount() == 0)
? null
: timeseriesMetadata;
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/CompactionMetricsManager.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/CompactionMetricsManager.java
new file mode 100644
index 0000000000000..fd72cea84769b
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/CompactionMetricsManager.java
@@ -0,0 +1,173 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.db.engine.compaction;
+
+import org.apache.iotdb.db.engine.compaction.constant.CompactionTaskStatus;
+import org.apache.iotdb.db.engine.compaction.constant.CompactionType;
+import org.apache.iotdb.db.engine.compaction.constant.ProcessChunkType;
+import org.apache.iotdb.db.engine.compaction.cross.AbstractCrossSpaceCompactionTask;
+import org.apache.iotdb.db.engine.compaction.inner.AbstractInnerSpaceCompactionTask;
+import org.apache.iotdb.db.engine.compaction.task.AbstractCompactionTask;
+import org.apache.iotdb.db.service.metrics.MetricsService;
+import org.apache.iotdb.db.service.metrics.enums.Metric;
+import org.apache.iotdb.db.service.metrics.enums.Tag;
+import org.apache.iotdb.metrics.config.MetricConfigDescriptor;
+import org.apache.iotdb.metrics.utils.MetricLevel;
+
+import java.util.concurrent.TimeUnit;
+
+public class CompactionMetricsManager {
+
+ public static void recordWriteInfo(
+ CompactionType compactionType,
+ ProcessChunkType processChunkType,
+ boolean aligned,
+ long byteNum) {
+ if (!MetricConfigDescriptor.getInstance().getMetricConfig().getEnableMetric()) {
+ return;
+ }
+ MetricsService.getInstance()
+ .getMetricManager()
+ .count(
+ byteNum / 1024L,
+ Metric.DATA_WRITTEN.toString(),
+ MetricLevel.IMPORTANT,
+ Tag.NAME.toString(),
+ "compaction",
+ Tag.NAME.toString(),
+ compactionType.toString(),
+ Tag.TYPE.toString(),
+ aligned ? "ALIGNED" : "NOT_ALIGNED",
+ Tag.TYPE.toString(),
+ processChunkType.toString());
+ MetricsService.getInstance()
+ .getMetricManager()
+ .count(
+ byteNum / 1024L,
+ Metric.DATA_WRITTEN.toString(),
+ MetricLevel.IMPORTANT,
+ Tag.NAME.toString(),
+ "compaction",
+ Tag.TYPE.toString(),
+ "total");
+ }
+
+ public static void recordReadInfo(long byteNum) {
+ if (!MetricConfigDescriptor.getInstance().getMetricConfig().getEnableMetric()) {
+ return;
+ }
+ MetricsService.getInstance()
+ .getMetricManager()
+ .count(
+ byteNum,
+ Metric.DATA_READ.toString(),
+ MetricLevel.IMPORTANT,
+ Tag.NAME.toString(),
+ "compaction");
+ }
+
+ public static void recordTaskInfo(
+ AbstractCompactionTask task, CompactionTaskStatus status, int size) {
+ if (!MetricConfigDescriptor.getInstance().getMetricConfig().getEnableMetric()) {
+ return;
+ }
+ String taskType = "unknown";
+ boolean isInnerTask = false;
+ if (task instanceof AbstractInnerSpaceCompactionTask) {
+ isInnerTask = true;
+ taskType = "inner";
+ } else if (task instanceof AbstractCrossSpaceCompactionTask) {
+ taskType = "cross";
+ }
+
+ switch (status) {
+ case ADD_TO_QUEUE:
+ case POLL_FROM_QUEUE:
+ MetricsService.getInstance()
+ .getMetricManager()
+ .getOrCreateGauge(
+ Metric.QUEUE.toString(),
+ MetricLevel.IMPORTANT,
+ Tag.NAME.toString(),
+ "compaction_" + taskType,
+ Tag.STATUS.toString(),
+ "waiting")
+ .set(size);
+ break;
+ case READY_TO_EXECUTE:
+ MetricsService.getInstance()
+ .getMetricManager()
+ .getOrCreateGauge(
+ Metric.QUEUE.toString(),
+ MetricLevel.IMPORTANT,
+ Tag.NAME.toString(),
+ "compaction_" + taskType,
+ Tag.STATUS.toString(),
+ "running")
+ .set(size);
+ break;
+ case FINISHED:
+ MetricsService.getInstance()
+ .getMetricManager()
+ .getOrCreateGauge(
+ Metric.QUEUE.toString(),
+ MetricLevel.IMPORTANT,
+ Tag.NAME.toString(),
+ "compaction_" + taskType,
+ Tag.STATUS.toString(),
+ "running")
+ .set(size);
+ MetricsService.getInstance()
+ .getMetricManager()
+ .timer(
+ task.getTimeCost(),
+ TimeUnit.MILLISECONDS,
+ Metric.COST_TASK.toString(),
+ MetricLevel.IMPORTANT,
+ Tag.NAME.toString(),
+ "compaction",
+ Tag.NAME.toString(),
+ isInnerTask ? "inner" : "cross");
+ if (isInnerTask) {
+ MetricsService.getInstance()
+ .getMetricManager()
+ .count(
+ 1,
+ Metric.COMPACTION_TASK_COUNT.toString(),
+ MetricLevel.IMPORTANT,
+ Tag.NAME.toString(),
+ "inner_compaction_count",
+ Tag.TYPE.toString(),
+ ((AbstractInnerSpaceCompactionTask) task).isSequence()
+ ? "sequence"
+ : "unsequence");
+ } else {
+ MetricsService.getInstance()
+ .getMetricManager()
+ .count(
+ 1,
+ Metric.COMPACTION_TASK_COUNT.toString(),
+ MetricLevel.IMPORTANT,
+ Tag.NAME.toString(),
+ "cross_compaction_count");
+ }
+ break;
+ }
+ }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/CompactionTaskComparator.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/CompactionTaskComparator.java
index cf80d9e555124..f05f5c81053eb 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/CompactionTaskComparator.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/CompactionTaskComparator.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.engine.compaction.constant.CompactionPriority;
import org.apache.iotdb.db.engine.compaction.cross.AbstractCrossSpaceCompactionTask;
import org.apache.iotdb.db.engine.compaction.inner.AbstractInnerSpaceCompactionTask;
import org.apache.iotdb.db.engine.compaction.task.AbstractCompactionTask;
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/CompactionTaskManager.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/CompactionTaskManager.java
index b608b14f8795c..280ea16c27352 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/CompactionTaskManager.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/CompactionTaskManager.java
@@ -24,19 +24,13 @@
import org.apache.iotdb.db.concurrent.threadpool.WrappedScheduledExecutorService;
import org.apache.iotdb.db.conf.IoTDBConstant;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.engine.compaction.cross.AbstractCrossSpaceCompactionTask;
-import org.apache.iotdb.db.engine.compaction.inner.AbstractInnerSpaceCompactionTask;
+import org.apache.iotdb.db.engine.compaction.constant.CompactionTaskStatus;
import org.apache.iotdb.db.engine.compaction.task.AbstractCompactionTask;
+import org.apache.iotdb.db.engine.compaction.task.CompactionTaskSummary;
import org.apache.iotdb.db.service.IService;
import org.apache.iotdb.db.service.ServiceType;
-import org.apache.iotdb.db.service.metrics.Metric;
-import org.apache.iotdb.db.service.metrics.MetricsService;
-import org.apache.iotdb.db.service.metrics.Tag;
import org.apache.iotdb.db.utils.TestOnly;
import org.apache.iotdb.db.utils.datastructure.FixedPriorityBlockingQueue;
-import org.apache.iotdb.metrics.config.MetricConfigDescriptor;
-import org.apache.iotdb.metrics.type.Gauge;
-import org.apache.iotdb.metrics.utils.MetricLevel;
import com.google.common.util.concurrent.RateLimiter;
import org.slf4j.Logger;
@@ -44,7 +38,6 @@
import java.util.ArrayList;
import java.util.Collections;
-import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -67,13 +60,16 @@ public class CompactionTaskManager implements IService {
// The thread pool that executes the compaction task. The default number of threads for this pool
// is 10.
private WrappedScheduledExecutorService taskExecutionPool;
+
+ // The thread pool that executes the sub compaction task.
+ private ScheduledExecutorService subCompactionTaskExecutionPool;
+
public static volatile AtomicInteger currentTaskNum = new AtomicInteger(0);
private FixedPriorityBlockingQueue candidateCompactionTaskQueue =
new FixedPriorityBlockingQueue<>(1024, new CompactionTaskComparator());
// , it is used to terminate all compaction tasks under the
// logicalStorageGroup
- private Map>> storageGroupTasks = new ConcurrentHashMap<>();
- private Map>>> compactionTaskFutures =
+ private Map>> storageGroupTasks =
new ConcurrentHashMap<>();
private List runningCompactionTaskList = new ArrayList<>();
@@ -100,11 +96,20 @@ public void start() {
IoTDBThreadPoolFactory.newScheduledThreadPool(
IoTDBDescriptor.getInstance().getConfig().getConcurrentCompactionThread(),
ThreadName.COMPACTION_SERVICE.getName());
+ this.subCompactionTaskExecutionPool =
+ IoTDBThreadPoolFactory.newScheduledThreadPool(
+ IoTDBDescriptor.getInstance().getConfig().getConcurrentCompactionThread()
+ * IoTDBDescriptor.getInstance().getConfig().getSubCompactionTaskNum(),
+ ThreadName.COMPACTION_SUB_SERVICE.getName());
currentTaskNum = new AtomicInteger(0);
compactionTaskSubmissionThreadPool =
IoTDBThreadPoolFactory.newScheduledThreadPool(1, ThreadName.COMPACTION_SERVICE.getName());
candidateCompactionTaskQueue.regsitPollLastHook(
AbstractCompactionTask::resetCompactionCandidateStatusForAllSourceFiles);
+ candidateCompactionTaskQueue.regsitPollLastHook(
+ x ->
+ CompactionMetricsManager.recordTaskInfo(
+ x, CompactionTaskStatus.POLL_FROM_QUEUE, candidateCompactionTaskQueue.size()));
// Periodically do the following: fetch the highest priority thread from the
// candidateCompactionTaskQueue, check that all tsfiles in the compaction task are valid, and
@@ -136,7 +141,7 @@ public void waitAndStop(long milliseconds) {
if (taskExecutionPool != null) {
awaitTermination(taskExecutionPool, milliseconds);
awaitTermination(compactionTaskSubmissionThreadPool, milliseconds);
- logger.info("Waiting for task taskExecutionPool to shut down");
+ logger.info("Waiting for task taskExecutionPool to shut down in {} ms", milliseconds);
waitTermination();
storageGroupTasks.clear();
}
@@ -165,6 +170,7 @@ public void waitAllCompactionFinish() {
}
}
storageGroupTasks.clear();
+ candidateCompactionTaskQueue.clear();
logger.info("All compaction task finish");
}
}
@@ -222,9 +228,8 @@ public synchronized boolean addTaskToWaitingQueue(AbstractCompactionTask compact
candidateCompactionTaskQueue.put(compactionTask);
// add metrics
- if (MetricConfigDescriptor.getInstance().getMetricConfig().getEnableMetric()) {
- addMetrics(compactionTask, true, false);
- }
+ CompactionMetricsManager.recordTaskInfo(
+ compactionTask, CompactionTaskStatus.ADD_TO_QUEUE, candidateCompactionTaskQueue.size());
return true;
}
@@ -243,18 +248,14 @@ public synchronized void submitTaskFromTaskQueue() {
AbstractCompactionTask task = candidateCompactionTaskQueue.take();
// add metrics
- if (MetricConfigDescriptor.getInstance().getMetricConfig().getEnableMetric()) {
- addMetrics(task, false, false);
- }
+ CompactionMetricsManager.recordTaskInfo(
+ task, CompactionTaskStatus.POLL_FROM_QUEUE, candidateCompactionTaskQueue.size());
if (task != null && task.checkValidAndSetMerging()) {
- submitTask(task.getFullStorageGroupName(), task.getTimePartition(), task);
+ submitTask(task);
runningCompactionTaskList.add(task);
-
- // add metrics
- if (MetricConfigDescriptor.getInstance().getMetricConfig().getEnableMetric()) {
- addMetrics(task, true, true);
- }
+ CompactionMetricsManager.recordTaskInfo(
+ task, CompactionTaskStatus.READY_TO_EXECUTE, runningCompactionTaskList.size());
}
}
} catch (InterruptedException e) {
@@ -289,59 +290,37 @@ public static void mergeRateLimiterAcquire(RateLimiter limiter, long bytesLength
}
}
- private void addMetrics(AbstractCompactionTask task, boolean isAdd, boolean isRunning) {
- String taskType = "unknown";
- if (task instanceof AbstractInnerSpaceCompactionTask) {
- taskType = "inner";
- } else if (task instanceof AbstractCrossSpaceCompactionTask) {
- taskType = "cross";
- }
- Gauge gauge =
- MetricsService.getInstance()
- .getMetricManager()
- .getOrCreateGauge(
- Metric.QUEUE.toString(),
- MetricLevel.IMPORTANT,
- Tag.NAME.toString(),
- "compaction_" + taskType,
- Tag.STATUS.toString(),
- isRunning ? "running" : "waiting");
- if (isAdd) {
- gauge.incr(1L);
- } else {
- gauge.decr(1L);
- }
- }
-
public synchronized void removeRunningTaskFromList(AbstractCompactionTask task) {
runningCompactionTaskList.remove(task);
// add metrics
- if (MetricConfigDescriptor.getInstance().getMetricConfig().getEnableMetric()) {
- addMetrics(task, false, true);
- }
+ CompactionMetricsManager.recordTaskInfo(
+ task, CompactionTaskStatus.FINISHED, runningCompactionTaskList.size());
}
/**
* This method will directly submit a task to thread pool if there is available thread.
*
- * @throws RejectedExecutionException
+ * @return the future of the task.
*/
- public synchronized void submitTask(
- String fullStorageGroupName, long timePartition, Callable compactionMergeTask)
- throws RejectedExecutionException {
- if (taskExecutionPool != null && !taskExecutionPool.isTerminated()) {
- Future future = taskExecutionPool.submit(compactionMergeTask);
- compactionTaskFutures
- .computeIfAbsent(fullStorageGroupName, k -> new ConcurrentHashMap<>())
- .computeIfAbsent(timePartition, k -> new HashSet<>())
- .add(future);
- return;
+ public synchronized Future submitTask(
+ Callable compactionMergeTask) throws RejectedExecutionException {
+ if (taskExecutionPool != null && !taskExecutionPool.isShutdown()) {
+ Future future = taskExecutionPool.submit(compactionMergeTask);
+ return future;
}
logger.warn(
"A CompactionTask failed to be submitted to CompactionTaskManager because {}",
taskExecutionPool == null
? "taskExecutionPool is null"
: "taskExecutionPool is terminated");
+ return null;
+ }
+
+ public synchronized Future submitSubTask(Callable subCompactionTask) {
+ if (subCompactionTaskExecutionPool != null && !subCompactionTaskExecutionPool.isShutdown()) {
+ return subCompactionTaskExecutionPool.submit(subCompactionTask);
+ }
+ return null;
}
/**
@@ -349,12 +328,12 @@ public synchronized void submitTask(
* corresponding storage group.
*/
public void abortCompaction(String fullStorageGroupName) {
- Set> subTasks =
+ Set> subTasks =
storageGroupTasks.getOrDefault(fullStorageGroupName, Collections.emptySet());
candidateCompactionTaskQueue.clear();
- Iterator> subIterator = subTasks.iterator();
+ Iterator> subIterator = subTasks.iterator();
while (subIterator.hasNext()) {
- Future next = subIterator.next();
+ Future next = subIterator.next();
if (!next.isDone() && !next.isCancelled()) {
next.cancel(true);
}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/CompactionUtils.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/CompactionUtils.java
index 906ead9c8f13f..c51d2dab985e4 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/CompactionUtils.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/CompactionUtils.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.db.engine.compaction;
import org.apache.iotdb.db.conf.IoTDBConstant;
+import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.engine.compaction.cross.rewrite.task.SubCompactionTask;
import org.apache.iotdb.db.engine.compaction.inner.utils.MultiTsFileDeviceIterator;
import org.apache.iotdb.db.engine.compaction.writer.AbstractCompactionWriter;
import org.apache.iotdb.db.engine.compaction.writer.CrossSpaceCompactionWriter;
@@ -42,12 +44,14 @@
import org.apache.iotdb.db.utils.QueryUtils;
import org.apache.iotdb.tsfile.common.constant.TsFileConstant;
import org.apache.iotdb.tsfile.exception.write.WriteProcessException;
+import org.apache.iotdb.tsfile.file.metadata.TimeseriesMetadata;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.fileSystem.FSFactoryProducer;
import org.apache.iotdb.tsfile.read.common.BatchData;
import org.apache.iotdb.tsfile.read.reader.IBatchReader;
import org.apache.iotdb.tsfile.utils.Pair;
import org.apache.iotdb.tsfile.write.schema.IMeasurementSchema;
+import org.apache.iotdb.tsfile.write.writer.TsFileIOWriter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -55,22 +59,26 @@
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.HashMap;
+import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
import java.util.stream.Collectors;
/**
* This tool can be used to perform inner space or cross space compaction of aligned and non aligned
* timeseries . Currently, we use {@link
* org.apache.iotdb.db.engine.compaction.inner.utils.InnerSpaceCompactionUtils} to speed up if it is
- * an inner space compaction.
+ * an seq inner space compaction.
*/
public class CompactionUtils {
private static final Logger logger =
LoggerFactory.getLogger(IoTDBConstant.COMPACTION_LOGGER_NAME);
+ private static final int subTaskNum =
+ IoTDBDescriptor.getInstance().getConfig().getSubCompactionTaskNum();
public static void compact(
List seqFileResources,
@@ -106,6 +114,7 @@ public static void compact(
}
compactionWriter.endFile();
+ updateDeviceStartTimeAndEndTime(targetFileResources, compactionWriter);
updatePlanIndexes(targetFileResources, seqFileResources, unseqFileResources);
} finally {
QueryResourceManager.getInstance().endQuery(queryId);
@@ -152,9 +161,9 @@ private static void compactAlignedSeries(
if (dataBatchReader.hasNextBatch()) {
// chunkgroup is serialized only when at least one timeseries under this device has data
compactionWriter.startChunkGroup(device, true);
- compactionWriter.startMeasurement(measurementSchemas);
- writeWithReader(compactionWriter, dataBatchReader);
- compactionWriter.endMeasurement();
+ compactionWriter.startMeasurement(measurementSchemas, 0);
+ writeWithReader(compactionWriter, dataBatchReader, 0);
+ compactionWriter.endMeasurement(0);
compactionWriter.endChunkGroup();
}
}
@@ -165,55 +174,58 @@ private static void compactNonAlignedSeries(
AbstractCompactionWriter compactionWriter,
QueryContext queryContext,
QueryDataSource queryDataSource)
- throws MetadataException, IOException {
- boolean hasStartChunkGroup = false;
+ throws IOException, InterruptedException {
MultiTsFileDeviceIterator.MeasurementIterator measurementIterator =
deviceIterator.iterateNotAlignedSeries(device, false);
Set allMeasurements = measurementIterator.getAllMeasurements();
+ int subTaskNums = Math.min(allMeasurements.size(), subTaskNum);
+
+ // assign all measurements to different sub tasks
+ Set[] measurementsForEachSubTask = new HashSet[subTaskNums];
+ int idx = 0;
for (String measurement : allMeasurements) {
- List measurementSchemas = new ArrayList<>();
- try {
- measurementSchemas.add(
- IoTDB.metaManager.getSeriesSchema(new PartialPath(device, measurement)));
- } catch (PathNotExistException e) {
- logger.info("A deleted path is skipped: {}", e.getMessage());
- continue;
+ if (measurementsForEachSubTask[idx % subTaskNums] == null) {
+ measurementsForEachSubTask[idx % subTaskNums] = new HashSet();
}
+ measurementsForEachSubTask[idx++ % subTaskNums].add(measurement);
+ }
- IBatchReader dataBatchReader =
- constructReader(
- device,
- Collections.singletonList(measurement),
- measurementSchemas,
- allMeasurements,
- queryContext,
- queryDataSource,
- false);
+ // construct sub tasks and start compacting measurements in parallel
+ List> futures = new ArrayList<>();
+ compactionWriter.startChunkGroup(device, false);
+ for (int i = 0; i < subTaskNums; i++) {
+ futures.add(
+ CompactionTaskManager.getInstance()
+ .submitSubTask(
+ new SubCompactionTask(
+ device,
+ measurementsForEachSubTask[i],
+ queryContext,
+ queryDataSource,
+ compactionWriter,
+ i)));
+ }
- if (dataBatchReader.hasNextBatch()) {
- if (!hasStartChunkGroup) {
- // chunkgroup is serialized only when at least one timeseries under this device has
- // data
- compactionWriter.startChunkGroup(device, false);
- hasStartChunkGroup = true;
- }
- compactionWriter.startMeasurement(measurementSchemas);
- writeWithReader(compactionWriter, dataBatchReader);
- compactionWriter.endMeasurement();
+ // wait for all sub tasks finish
+ for (int i = 0; i < subTaskNums; i++) {
+ try {
+ futures.get(i).get();
+ } catch (InterruptedException | ExecutionException e) {
+ logger.error("SubCompactionTask meet errors ", e);
+ Thread.interrupted();
+ throw new InterruptedException();
}
}
- if (hasStartChunkGroup) {
- compactionWriter.endChunkGroup();
- }
+ compactionWriter.endChunkGroup();
}
- private static void writeWithReader(AbstractCompactionWriter writer, IBatchReader reader)
- throws IOException {
+ public static void writeWithReader(
+ AbstractCompactionWriter writer, IBatchReader reader, int subTaskId) throws IOException {
while (reader.hasNextBatch()) {
BatchData batchData = reader.nextBatch();
while (batchData.hasCurrent()) {
- writer.write(batchData.currentTime(), batchData.currentValue());
+ writer.write(batchData.currentTime(), batchData.currentValue(), subTaskId);
batchData.next();
}
}
@@ -223,7 +235,7 @@ private static void writeWithReader(AbstractCompactionWriter writer, IBatchReade
* @param measurementIds if device is aligned, then measurementIds contain all measurements. If
* device is not aligned, then measurementIds only contain one measurement.
*/
- private static IBatchReader constructReader(
+ public static IBatchReader constructReader(
String deviceId,
List measurementIds,
List measurementSchemas,
@@ -259,6 +271,29 @@ private static AbstractCompactionWriter getCompactionWriter(
}
}
+ private static void updateDeviceStartTimeAndEndTime(
+ List targetResources, AbstractCompactionWriter compactionWriter) {
+ List targetFileWriters = compactionWriter.getFileIOWriter();
+ for (int i = 0; i < targetFileWriters.size(); i++) {
+ TsFileIOWriter fileIOWriter = targetFileWriters.get(i);
+ TsFileResource fileResource = targetResources.get(i);
+ // The tmp target file may does not have any data points written due to the existence of the
+ // mods file, and it will be deleted after compaction. So skip the target file that has been
+ // deleted.
+ if (!fileResource.getTsFile().exists()) {
+ continue;
+ }
+ for (Map.Entry> entry :
+ fileIOWriter.getDeviceTimeseriesMetadataMap().entrySet()) {
+ String device = entry.getKey();
+ for (TimeseriesMetadata timeseriesMetadata : entry.getValue()) {
+ fileResource.updateStartTime(device, timeseriesMetadata.getStatistics().getStartTime());
+ fileResource.updateEndTime(device, timeseriesMetadata.getStatistics().getEndTime());
+ }
+ }
+ }
+ }
+
private static void updatePlanIndexes(
List targetResources,
List seqResources,
@@ -271,7 +306,7 @@ private static void updatePlanIndexes(
// in the new file
for (int i = 0; i < targetResources.size(); i++) {
TsFileResource targetResource = targetResources.get(i);
- // remove the target file been deleted from list
+ // remove the target file that has been deleted from list
if (!targetResource.getTsFile().exists()) {
targetResources.remove(i--);
continue;
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/CompactionPriority.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/constant/CompactionPriority.java
similarity index 94%
rename from server/src/main/java/org/apache/iotdb/db/engine/compaction/CompactionPriority.java
rename to server/src/main/java/org/apache/iotdb/db/engine/compaction/constant/CompactionPriority.java
index 80b4bc8144136..41ef96c7a53b7 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/CompactionPriority.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/constant/CompactionPriority.java
@@ -17,7 +17,7 @@
* under the License.
*/
-package org.apache.iotdb.db.engine.compaction;
+package org.apache.iotdb.db.engine.compaction.constant;
public enum CompactionPriority {
INNER_CROSS,
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/constant/CompactionTaskStatus.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/constant/CompactionTaskStatus.java
new file mode 100644
index 0000000000000..4b1972a65436a
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/constant/CompactionTaskStatus.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.db.engine.compaction.constant;
+
+public enum CompactionTaskStatus {
+ ADD_TO_QUEUE,
+ POLL_FROM_QUEUE,
+ READY_TO_EXECUTE,
+ FINISHED
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/constant/CompactionType.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/constant/CompactionType.java
new file mode 100644
index 0000000000000..127039d9cea62
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/constant/CompactionType.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.db.engine.compaction.constant;
+
+public enum CompactionType {
+ INNER_SEQ_COMPACTION,
+ INNER_UNSEQ_COMPACTION,
+ CROSS_COMPACTION
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/constant/ProcessChunkType.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/constant/ProcessChunkType.java
new file mode 100644
index 0000000000000..f83cd2033f95a
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/constant/ProcessChunkType.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.db.engine.compaction.constant;
+
+public enum ProcessChunkType {
+ FLUSH_CHUNK,
+ MERGE_CHUNK,
+ DESERIALIZE_CHUNK
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/cross/AbstractCrossSpaceCompactionTask.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/cross/AbstractCrossSpaceCompactionTask.java
index 7fe754c696a45..d8b1bbd6a8290 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/cross/AbstractCrossSpaceCompactionTask.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/cross/AbstractCrossSpaceCompactionTask.java
@@ -20,7 +20,9 @@
package org.apache.iotdb.db.engine.compaction.cross;
import org.apache.iotdb.db.engine.compaction.task.AbstractCompactionTask;
+import org.apache.iotdb.db.engine.storagegroup.TsFileManager;
import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
+import org.apache.iotdb.db.engine.storagegroup.TsFileResourceStatus;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
@@ -34,23 +36,18 @@ public AbstractCrossSpaceCompactionTask(
long timePartition,
AtomicInteger currentTaskNum,
List selectedSequenceFiles,
- List selectedUnsequenceFiles) {
- super(fullStorageGroupName, timePartition, currentTaskNum);
+ List selectedUnsequenceFiles,
+ TsFileManager tsFileManager) {
+ super(fullStorageGroupName, timePartition, tsFileManager, currentTaskNum);
this.selectedSequenceFiles = selectedSequenceFiles;
this.selectedUnsequenceFiles = selectedUnsequenceFiles;
}
- public AbstractCrossSpaceCompactionTask(
- String fullStorageGroupName, long timePartition, AtomicInteger currentTaskNum) {
- super(fullStorageGroupName, timePartition, currentTaskNum);
- this.selectedSequenceFiles = null;
- this.selectedUnsequenceFiles = null;
- }
-
@Override
public void setSourceFilesToCompactionCandidate() {
- this.selectedSequenceFiles.forEach(x -> x.setCompactionCandidate(true));
- this.selectedUnsequenceFiles.forEach(x -> x.setCompactionCandidate(true));
+ this.selectedSequenceFiles.forEach(x -> x.setStatus(TsFileResourceStatus.COMPACTION_CANDIDATE));
+ this.selectedUnsequenceFiles.forEach(
+ x -> x.setStatus(TsFileResourceStatus.COMPACTION_CANDIDATE));
}
public List getSelectedSequenceFiles() {
@@ -76,13 +73,11 @@ public boolean checkValidAndSetMerging() {
}
for (TsFileResource resource : selectedSequenceFiles) {
- resource.setCompacting(true);
- resource.setCompactionCandidate(false);
+ resource.setStatus(TsFileResourceStatus.COMPACTING);
}
for (TsFileResource resource : selectedUnsequenceFiles) {
- resource.setCompacting(true);
- resource.setCompactionCandidate(false);
+ resource.setStatus(TsFileResourceStatus.COMPACTING);
}
return true;
@@ -103,7 +98,7 @@ public String toString() {
@Override
public void resetCompactionCandidateStatusForAllSourceFiles() {
- selectedSequenceFiles.forEach(x -> x.setCompactionCandidate(false));
- selectedUnsequenceFiles.forEach(x -> x.setCompactionCandidate(false));
+ selectedSequenceFiles.forEach(x -> x.setStatus(TsFileResourceStatus.CLOSED));
+ selectedUnsequenceFiles.forEach(x -> x.setStatus(TsFileResourceStatus.CLOSED));
}
}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/cross/CrossCompactionStrategy.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/cross/CrossCompactionStrategy.java
index a9579614456ff..05ae8acf5a960 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/cross/CrossCompactionStrategy.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/cross/CrossCompactionStrategy.java
@@ -20,12 +20,10 @@
import org.apache.iotdb.db.engine.compaction.CompactionTaskManager;
import org.apache.iotdb.db.engine.compaction.cross.rewrite.RewriteCrossSpaceCompactionSelector;
-import org.apache.iotdb.db.engine.compaction.cross.rewrite.task.RewriteCrossCompactionRecoverTask;
import org.apache.iotdb.db.engine.compaction.cross.rewrite.task.RewriteCrossSpaceCompactionTask;
import org.apache.iotdb.db.engine.storagegroup.TsFileManager;
import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
-import java.io.File;
import java.util.List;
public enum CrossCompactionStrategy {
@@ -59,25 +57,6 @@ public AbstractCrossSpaceCompactionTask getCompactionTask(
}
}
- public AbstractCrossSpaceCompactionTask getCompactionRecoverTask(
- String logicalStorageGroupName,
- String virtualStorageGroupName,
- long timePartitionId,
- File logFile,
- TsFileManager tsFileManager) {
- switch (this) {
- case REWRITE_COMPACTION:
- default:
- return new RewriteCrossCompactionRecoverTask(
- logicalStorageGroupName,
- virtualStorageGroupName,
- timePartitionId,
- logFile,
- CompactionTaskManager.currentTaskNum,
- tsFileManager);
- }
- }
-
public AbstractCrossSpaceCompactionSelector getCompactionSelector(
String logicalStorageGroupName,
String virtualGroupId,
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/cross/rewrite/selector/RewriteCompactionFileSelector.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/cross/rewrite/selector/RewriteCompactionFileSelector.java
index 858293f6d6464..57cd06daac386 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/cross/rewrite/selector/RewriteCompactionFileSelector.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/cross/rewrite/selector/RewriteCompactionFileSelector.java
@@ -103,7 +103,7 @@ public RewriteCompactionFileSelector(CrossSpaceCompactionResource resource, long
public List[] select() throws MergeException {
long startTime = System.currentTimeMillis();
try {
- logger.info(
+ logger.debug(
"Selecting merge candidates from {} seqFile, {} unseqFiles",
resource.getSeqFiles().size(),
resource.getUnseqFiles().size());
@@ -115,7 +115,7 @@ public List[] select() throws MergeException {
resource.setUnseqFiles(selectedUnseqFiles);
resource.removeOutdatedSeqReaders();
if (selectedUnseqFiles.isEmpty()) {
- logger.info("No merge candidates are found");
+ logger.debug("No merge candidates are found");
return new List[0];
}
} catch (IOException e) {
@@ -198,9 +198,10 @@ void select(boolean useTightBound) throws IOException {
}
private boolean updateSelectedFiles(long newCost, TsFileResource unseqFile) {
- if (seqSelectedNum + selectedUnseqFiles.size() + 1 + tmpSelectedSeqFiles.size()
- <= maxCrossCompactionFileNum
- && totalCost + newCost < memoryBudget) {
+ if (selectedUnseqFiles.size() == 0
+ || (seqSelectedNum + selectedUnseqFiles.size() + 1 + tmpSelectedSeqFiles.size()
+ <= maxCrossCompactionFileNum
+ && totalCost + newCost < memoryBudget)) {
selectedUnseqFiles.add(unseqFile);
maxSeqFileCost = tempMaxSeqFileCost;
@@ -226,8 +227,8 @@ private boolean updateSelectedFiles(long newCost, TsFileResource unseqFile) {
/**
* To avoid redundant data in seq files, cross space compaction should select all the seq files
* which have overlap with unseq files whether they are compacting or not. Therefore, before
- * adding task into the queue, cross space compaction task should be check whether source seq
- * files are being compacted or not to speed up compaction.
+ * adding task into the queue, cross space compaction task should check whether source seq files
+ * are being compacted or not to speed up compaction.
*/
private boolean checkIsSeqFilesValid() {
for (Integer seqIdx : tmpSelectedSeqFiles) {
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/cross/rewrite/task/RewriteCrossCompactionRecoverTask.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/cross/rewrite/task/RewriteCrossCompactionRecoverTask.java
index 9fd2c7ddbfd18..3c7084b8f81fb 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/cross/rewrite/task/RewriteCrossCompactionRecoverTask.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/cross/rewrite/task/RewriteCrossCompactionRecoverTask.java
@@ -31,6 +31,7 @@
import org.apache.iotdb.db.engine.storagegroup.TsFileManager;
import org.apache.iotdb.db.engine.storagegroup.TsFileNameGenerator;
import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
+import org.apache.iotdb.db.engine.storagegroup.TsFileResourceStatus;
import org.apache.iotdb.db.utils.FileLoaderUtils;
import org.apache.iotdb.tsfile.common.constant.TsFileConstant;
import org.apache.iotdb.tsfile.fileSystem.FSFactoryProducer;
@@ -221,7 +222,9 @@ private boolean handleWithoutAllSourceFilesExist(List sourceFi
for (TsFileIdentifier sourceFileIdentifier : sourceFileIdentifiers) {
File sourceFile = sourceFileIdentifier.getFileFromDataDirs();
if (sourceFile != null) {
- remainSourceTsFileResources.add(new TsFileResource(sourceFile));
+ TsFileResource resource = new TsFileResource(sourceFile);
+ resource.setStatus(TsFileResourceStatus.CLOSED);
+ remainSourceTsFileResources.add(resource);
} else {
// if source file does not exist, its resource file may still exist, so delete it.
File resourceFile =
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/cross/rewrite/task/RewriteCrossSpaceCompactionTask.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/cross/rewrite/task/RewriteCrossSpaceCompactionTask.java
index dffd3292c686a..dc24ed005affe 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/cross/rewrite/task/RewriteCrossSpaceCompactionTask.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/cross/rewrite/task/RewriteCrossSpaceCompactionTask.java
@@ -21,13 +21,14 @@
import org.apache.iotdb.db.conf.IoTDBConstant;
import org.apache.iotdb.db.engine.compaction.CompactionUtils;
import org.apache.iotdb.db.engine.compaction.cross.AbstractCrossSpaceCompactionTask;
-import org.apache.iotdb.db.engine.compaction.cross.CrossSpaceCompactionExceptionHandler;
import org.apache.iotdb.db.engine.compaction.task.AbstractCompactionTask;
+import org.apache.iotdb.db.engine.compaction.task.CompactionExceptionHandler;
import org.apache.iotdb.db.engine.compaction.utils.log.CompactionLogger;
import org.apache.iotdb.db.engine.storagegroup.TsFileManager;
import org.apache.iotdb.db.engine.storagegroup.TsFileNameGenerator;
import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
import org.apache.iotdb.db.engine.storagegroup.TsFileResourceList;
+import org.apache.iotdb.db.engine.storagegroup.TsFileResourceStatus;
import org.apache.iotdb.db.exception.StorageEngineException;
import org.apache.iotdb.db.exception.metadata.MetadataException;
import org.apache.iotdb.db.query.control.FileReaderManager;
@@ -53,7 +54,6 @@ public class RewriteCrossSpaceCompactionTask extends AbstractCrossSpaceCompactio
protected List selectedUnSeqTsFileResourceList;
protected TsFileResourceList seqTsFileResourceList;
protected TsFileResourceList unseqTsFileResourceList;
- protected TsFileManager tsFileManager;
private File logFile;
private List targetTsfileResourceList;
@@ -73,12 +73,12 @@ public RewriteCrossSpaceCompactionTask(
timePartitionId,
currentTaskNum,
selectedSeqTsFileResourceList,
- selectedUnSeqTsFileResourceList);
+ selectedUnSeqTsFileResourceList,
+ tsFileManager);
this.selectedSeqTsFileResourceList = selectedSeqTsFileResourceList;
this.selectedUnSeqTsFileResourceList = selectedUnSeqTsFileResourceList;
this.seqTsFileResourceList = tsFileManager.getSequenceListByTimePartition(timePartition);
this.unseqTsFileResourceList = tsFileManager.getUnsequenceListByTimePartition(timePartition);
- this.tsFileManager = tsFileManager;
}
@Override
@@ -87,15 +87,17 @@ protected void doCompaction() throws Exception {
executeCompaction();
} catch (Throwable throwable) {
// catch throwable instead of exception to handle OOM errors
- logger.error("Meet errors in cross space compaction, {}", throwable.getMessage());
- CrossSpaceCompactionExceptionHandler.handleException(
+ logger.error("Meet errors in cross space compaction", throwable);
+ CompactionExceptionHandler.handleException(
fullStorageGroupName,
logFile,
targetTsfileResourceList,
selectedSeqTsFileResourceList,
selectedUnSeqTsFileResourceList,
tsFileManager,
- timePartition);
+ timePartition,
+ false,
+ true);
throw throwable;
} finally {
releaseAllLock();
@@ -121,6 +123,14 @@ private void executeCompaction()
return;
}
+ long totalSize = 0L;
+ for (TsFileResource resource : selectedSeqTsFileResourceList) {
+ totalSize += resource.getTsFileSize();
+ }
+ for (TsFileResource resource : selectedUnSeqTsFileResourceList) {
+ totalSize += resource.getTsFileSize();
+ }
+
logger.info(
"{} [Compaction] CrossSpaceCompactionTask start. Sequence files : {}, unsequence files : {}",
fullStorageGroupName,
@@ -168,10 +178,12 @@ private void executeCompaction()
if (logFile.exists()) {
FileUtils.delete(logFile);
}
+ double costTime = (System.currentTimeMillis() - startTime) / 1000.0d;
logger.info(
- "{} [Compaction] CrossSpaceCompactionTask Costs {} s",
+ "{} [Compaction] CrossSpaceCompactionTask Costs {} s, compaction speed is {} MB/s",
fullStorageGroupName,
- (System.currentTimeMillis() - startTime) / 1000);
+ costTime,
+ totalSize / 1024.0d / 1024.0d / costTime);
}
}
@@ -186,7 +198,7 @@ private boolean addReadLock(List tsFileResourceList) {
releaseAllLock();
return false;
}
- tsFileResource.setCompacting(true);
+ tsFileResource.setStatus(TsFileResourceStatus.COMPACTING);
}
return true;
}
@@ -207,15 +219,15 @@ private void releaseReadAndLockWrite(List tsFileResourceList) {
}
private void releaseAllLock() {
- selectedSeqTsFileResourceList.forEach(x -> x.setCompactionCandidate(false));
- selectedUnSeqTsFileResourceList.forEach(x -> x.setCompactionCandidate(false));
+ selectedSeqTsFileResourceList.forEach(x -> x.setStatus(TsFileResourceStatus.CLOSED));
+ selectedUnSeqTsFileResourceList.forEach(x -> x.setStatus(TsFileResourceStatus.CLOSED));
for (TsFileResource tsFileResource : holdReadLockList) {
tsFileResource.readUnlock();
- tsFileResource.setCompacting(false);
+ tsFileResource.setStatus(TsFileResourceStatus.CLOSED);
}
for (TsFileResource tsFileResource : holdWriteLockList) {
tsFileResource.writeUnlock();
- tsFileResource.setCompacting(false);
+ tsFileResource.setStatus(TsFileResourceStatus.CLOSED);
}
holdReadLockList.clear();
holdWriteLockList.clear();
@@ -224,7 +236,7 @@ private void releaseAllLock() {
private void deleteOldFiles(List tsFileResourceList) throws IOException {
for (TsFileResource tsFileResource : tsFileResourceList) {
FileReaderManager.getInstance().closeFileAndRemoveReader(tsFileResource.getTsFilePath());
- tsFileResource.setDeleted(true);
+ tsFileResource.setStatus(TsFileResourceStatus.DELETED);
tsFileResource.remove();
logger.info(
"[CrossSpaceCompaction] Delete TsFile :{}.",
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/cross/rewrite/task/SubCompactionTask.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/cross/rewrite/task/SubCompactionTask.java
new file mode 100644
index 0000000000000..08f63c6e8b018
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/cross/rewrite/task/SubCompactionTask.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.db.engine.compaction.cross.rewrite.task;
+
+import org.apache.iotdb.db.conf.IoTDBConstant;
+import org.apache.iotdb.db.engine.compaction.CompactionUtils;
+import org.apache.iotdb.db.engine.compaction.writer.AbstractCompactionWriter;
+import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
+import org.apache.iotdb.db.exception.metadata.PathNotExistException;
+import org.apache.iotdb.db.metadata.path.PartialPath;
+import org.apache.iotdb.db.query.context.QueryContext;
+import org.apache.iotdb.db.service.IoTDB;
+import org.apache.iotdb.tsfile.read.reader.IBatchReader;
+import org.apache.iotdb.tsfile.write.schema.IMeasurementSchema;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.Callable;
+
+/**
+ * This class is used to implement reading the measurements and writing to the target files in
+ * parallel in the compaction. Currently, it only works for nonAligned data in cross space
+ * compaction and unseq inner space compaction.
+ */
+public class SubCompactionTask implements Callable {
+ private static final Logger logger =
+ LoggerFactory.getLogger(IoTDBConstant.COMPACTION_LOGGER_NAME);
+ private final String device;
+ private final Set measurementList;
+ private final QueryContext queryContext;
+ private final QueryDataSource queryDataSource;
+ private final AbstractCompactionWriter compactionWriter;
+ private final int taskId;
+
+ public SubCompactionTask(
+ String device,
+ Set measurementList,
+ QueryContext queryContext,
+ QueryDataSource queryDataSource,
+ AbstractCompactionWriter compactionWriter,
+ int taskId) {
+ this.device = device;
+ this.measurementList = measurementList;
+ this.queryContext = queryContext;
+ this.queryDataSource = queryDataSource;
+ this.compactionWriter = compactionWriter;
+ this.taskId = taskId;
+ }
+
+ @Override
+ public Void call() throws Exception {
+ for (String measurement : measurementList) {
+ List measurementSchemas = new ArrayList<>();
+ try {
+ measurementSchemas.add(
+ IoTDB.metaManager.getSeriesSchema(new PartialPath(device, measurement)));
+ } catch (PathNotExistException e) {
+ logger.info("A deleted path is skipped: {}", e.getMessage());
+ continue;
+ }
+
+ IBatchReader dataBatchReader =
+ CompactionUtils.constructReader(
+ device,
+ Collections.singletonList(measurement),
+ measurementSchemas,
+ measurementList,
+ queryContext,
+ queryDataSource,
+ false);
+
+ if (dataBatchReader.hasNextBatch()) {
+ compactionWriter.startMeasurement(measurementSchemas, taskId);
+ CompactionUtils.writeWithReader(compactionWriter, dataBatchReader, taskId);
+ compactionWriter.endMeasurement(taskId);
+ }
+ }
+ return null;
+ }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/AbstractInnerSpaceCompactionTask.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/AbstractInnerSpaceCompactionTask.java
index ca0095231fc57..c511af81cfc15 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/AbstractInnerSpaceCompactionTask.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/AbstractInnerSpaceCompactionTask.java
@@ -21,8 +21,10 @@
import org.apache.iotdb.db.conf.IoTDBConstant;
import org.apache.iotdb.db.engine.compaction.task.AbstractCompactionTask;
+import org.apache.iotdb.db.engine.storagegroup.TsFileManager;
import org.apache.iotdb.db.engine.storagegroup.TsFileNameGenerator;
import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
+import org.apache.iotdb.db.engine.storagegroup.TsFileResourceStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -47,8 +49,9 @@ public AbstractInnerSpaceCompactionTask(
long timePartition,
AtomicInteger currentTaskNum,
boolean sequence,
- List selectedTsFileResourceList) {
- super(storageGroupName, timePartition, currentTaskNum);
+ List selectedTsFileResourceList,
+ TsFileManager tsFileManager) {
+ super(storageGroupName, timePartition, tsFileManager, currentTaskNum);
this.selectedTsFileResourceList = selectedTsFileResourceList;
this.sequence = sequence;
collectSelectedFilesInfo();
@@ -56,7 +59,14 @@ public AbstractInnerSpaceCompactionTask(
@Override
public void setSourceFilesToCompactionCandidate() {
- this.selectedTsFileResourceList.forEach(x -> x.setCompactionCandidate(true));
+ this.selectedTsFileResourceList.forEach(
+ tsFileResource -> {
+ try {
+ tsFileResource.setStatus(TsFileResourceStatus.COMPACTION_CANDIDATE);
+ } catch (Exception e) {
+ LOGGER.error("Exception occurs when setting compaction candidate", e);
+ }
+ });
}
private void collectSelectedFilesInfo() {
@@ -114,8 +124,7 @@ public boolean checkValidAndSetMerging() {
}
for (TsFileResource resource : selectedTsFileResourceList) {
- resource.setCompacting(true);
- resource.setCompactionCandidate(false);
+ resource.setStatus(TsFileResourceStatus.COMPACTING);
}
return true;
}
@@ -137,6 +146,6 @@ public String toString() {
@Override
public void resetCompactionCandidateStatusForAllSourceFiles() {
- selectedTsFileResourceList.forEach(x -> x.setCompactionCandidate(false));
+ selectedTsFileResourceList.forEach(x -> x.setStatus(TsFileResourceStatus.CLOSED));
}
}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/InnerCompactionStrategy.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/InnerCompactionStrategy.java
index 89e3cf0ca4906..35d8c7426cb11 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/InnerCompactionStrategy.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/InnerCompactionStrategy.java
@@ -20,13 +20,11 @@
package org.apache.iotdb.db.engine.compaction.inner;
import org.apache.iotdb.db.engine.compaction.CompactionTaskManager;
-import org.apache.iotdb.db.engine.compaction.inner.sizetiered.SizeTieredCompactionRecoverTask;
import org.apache.iotdb.db.engine.compaction.inner.sizetiered.SizeTieredCompactionSelector;
import org.apache.iotdb.db.engine.compaction.inner.sizetiered.SizeTieredCompactionTask;
import org.apache.iotdb.db.engine.storagegroup.TsFileManager;
import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
-import java.io.File;
import java.util.List;
public enum InnerCompactionStrategy {
@@ -60,29 +58,6 @@ public AbstractInnerSpaceCompactionTask getCompactionTask(
}
}
- public AbstractInnerSpaceCompactionTask getCompactionRecoverTask(
- String logicalStorageGroupName,
- String virtualStorageGroup,
- long timePartition,
- File compactionLogFile,
- String dataDir,
- boolean sequence,
- TsFileManager tsFileManager) {
- switch (this) {
- case SIZE_TIERED_COMPACTION:
- default:
- return new SizeTieredCompactionRecoverTask(
- logicalStorageGroupName,
- virtualStorageGroup,
- timePartition,
- compactionLogFile,
- dataDir,
- sequence,
- CompactionTaskManager.currentTaskNum,
- tsFileManager);
- }
- }
-
public AbstractInnerSpaceCompactionSelector getCompactionSelector(
String logicalStorageGroupName,
String virtualStorageGroupName,
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/sizetiered/SizeTieredCompactionRecoverTask.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/sizetiered/SizeTieredCompactionRecoverTask.java
index af90f8635767f..23f1824fcbea6 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/sizetiered/SizeTieredCompactionRecoverTask.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/sizetiered/SizeTieredCompactionRecoverTask.java
@@ -29,6 +29,7 @@
import org.apache.iotdb.db.engine.modification.ModificationFile;
import org.apache.iotdb.db.engine.storagegroup.TsFileManager;
import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
+import org.apache.iotdb.db.engine.storagegroup.TsFileResourceStatus;
import org.apache.iotdb.tsfile.common.constant.TsFileConstant;
import org.apache.commons.io.FileUtils;
@@ -228,7 +229,9 @@ private boolean handleWithoutAllSourceFilesExist(List sourceFi
for (TsFileIdentifier sourceFileIdentifier : sourceFileIdentifiers) {
File sourceFile = sourceFileIdentifier.getFileFromDataDirs();
if (sourceFile != null) {
- remainSourceTsFileResources.add(new TsFileResource(sourceFile));
+ TsFileResource resource = new TsFileResource(sourceFile);
+ resource.setStatus(TsFileResourceStatus.CLOSED);
+ remainSourceTsFileResources.add(resource);
} else {
// if source file does not exist, its resource file may still exist, so delete it.
File resourceFile =
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/sizetiered/SizeTieredCompactionSelector.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/sizetiered/SizeTieredCompactionSelector.java
index bed2abbc866c8..b8d9a4c746570 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/sizetiered/SizeTieredCompactionSelector.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/sizetiered/SizeTieredCompactionSelector.java
@@ -123,7 +123,10 @@ private boolean selectLevelTask(
for (TsFileResource currentFile : tsFileResources) {
TsFileNameGenerator.TsFileName currentName =
TsFileNameGenerator.getTsFileName(currentFile.getTsFile().getName());
- if (currentName.getInnerCompactionCnt() != level || currentFile.isCompactionCandidate()) {
+ if (currentName.getInnerCompactionCnt() != level
+ || currentFile.isCompactionCandidate()
+ || currentFile.isCompacting()
+ || !currentFile.isClosed()) {
selectedFileList.clear();
selectedFileSize = 0L;
continue;
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/sizetiered/SizeTieredCompactionTask.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/sizetiered/SizeTieredCompactionTask.java
index a77e625c4c524..d5a5f675e927c 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/sizetiered/SizeTieredCompactionTask.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/sizetiered/SizeTieredCompactionTask.java
@@ -21,21 +21,24 @@
import org.apache.iotdb.db.conf.IoTDBConstant;
import org.apache.iotdb.db.engine.compaction.CompactionUtils;
import org.apache.iotdb.db.engine.compaction.inner.AbstractInnerSpaceCompactionTask;
-import org.apache.iotdb.db.engine.compaction.inner.InnerSpaceCompactionExceptionHandler;
import org.apache.iotdb.db.engine.compaction.inner.utils.InnerSpaceCompactionUtils;
import org.apache.iotdb.db.engine.compaction.task.AbstractCompactionTask;
+import org.apache.iotdb.db.engine.compaction.task.CompactionExceptionHandler;
import org.apache.iotdb.db.engine.compaction.utils.log.CompactionLogger;
import org.apache.iotdb.db.engine.storagegroup.TsFileManager;
import org.apache.iotdb.db.engine.storagegroup.TsFileNameGenerator;
import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
import org.apache.iotdb.db.engine.storagegroup.TsFileResourceList;
+import org.apache.iotdb.db.engine.storagegroup.TsFileResourceStatus;
import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
+import org.apache.iotdb.tsfile.exception.write.TsFileNotCompleteException;
import org.apache.commons.io.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
+import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
@@ -48,7 +51,6 @@ public class SizeTieredCompactionTask extends AbstractInnerSpaceCompactionTask {
private static final Logger LOGGER =
LoggerFactory.getLogger(IoTDBConstant.COMPACTION_LOGGER_NAME);
protected TsFileResourceList tsFileResourceList;
- protected TsFileManager tsFileManager;
protected boolean[] isHoldingReadLock;
protected boolean[] isHoldingWriteLock;
@@ -65,8 +67,8 @@ public SizeTieredCompactionTask(
timePartition,
currentTaskNum,
sequence,
- selectedTsFileResourceList);
- this.tsFileManager = tsFileManager;
+ selectedTsFileResourceList,
+ tsFileManager);
isHoldingReadLock = new boolean[selectedTsFileResourceList.size()];
isHoldingWriteLock = new boolean[selectedTsFileResourceList.size()];
for (int i = 0; i < selectedTsFileResourceList.size(); ++i) {
@@ -92,6 +94,8 @@ protected void doCompaction() throws Exception {
TsFileResource targetTsFileResource =
TsFileNameGenerator.getInnerCompactionTargetFileResource(
selectedTsFileResourceList, sequence);
+ List targetTsFileList =
+ new ArrayList<>(Collections.singletonList(targetTsFileResource));
LOGGER.info(
"{} [Compaction] starting compaction task with {} files",
fullStorageGroupName,
@@ -107,8 +111,7 @@ protected void doCompaction() throws Exception {
sizeTieredCompactionLogger = new CompactionLogger(logFile);
sizeTieredCompactionLogger.logFiles(
selectedTsFileResourceList, CompactionLogger.STR_SOURCE_FILES);
- sizeTieredCompactionLogger.logFiles(
- Collections.singletonList(targetTsFileResource), CompactionLogger.STR_TARGET_FILES);
+ sizeTieredCompactionLogger.logFiles(targetTsFileList, CompactionLogger.STR_TARGET_FILES);
LOGGER.info("{} [SizeTiredCompactionTask] Close the logger", fullStorageGroupName);
sizeTieredCompactionLogger.close();
LOGGER.info(
@@ -119,9 +122,7 @@ protected void doCompaction() throws Exception {
InnerSpaceCompactionUtils.compact(targetTsFileResource, selectedTsFileResourceList);
} else {
CompactionUtils.compact(
- Collections.emptyList(),
- selectedTsFileResourceList,
- Collections.singletonList(targetTsFileResource));
+ Collections.emptyList(), selectedTsFileResourceList, targetTsFileList);
}
InnerSpaceCompactionUtils.moveTargetFile(targetTsFileResource, fullStorageGroupName);
@@ -140,14 +141,14 @@ protected void doCompaction() throws Exception {
tsFileManager.replace(
selectedTsFileResourceList,
Collections.emptyList(),
- Collections.singletonList(targetTsFileResource),
+ targetTsFileList,
timePartition,
true);
} else {
tsFileManager.replace(
Collections.emptyList(),
selectedTsFileResourceList,
- Collections.singletonList(targetTsFileResource),
+ targetTsFileList,
timePartition,
false);
}
@@ -164,10 +165,11 @@ protected void doCompaction() throws Exception {
isHoldingWriteLock[i] = true;
}
- if (targetTsFileResource.getTsFile().length()
- < TSFileConfig.MAGIC_STRING.getBytes().length * 2L + Byte.BYTES) {
+ if (targetTsFileResource.getTsFile().exists()
+ && targetTsFileResource.getTsFile().length()
+ < TSFileConfig.MAGIC_STRING.getBytes().length * 2L + Byte.BYTES) {
// the file size is smaller than magic string and version number
- throw new RuntimeException(
+ throw new TsFileNotCompleteException(
String.format(
"target file %s is smaller than magic string and version number size",
targetTsFileResource));
@@ -181,35 +183,49 @@ protected void doCompaction() throws Exception {
InnerSpaceCompactionUtils.deleteModificationForSourceFile(
selectedTsFileResourceList, fullStorageGroupName);
- long costTime = System.currentTimeMillis() - startTime;
+ double costTime = (System.currentTimeMillis() - startTime) / 1000.0d;
LOGGER.info(
"{} [SizeTiredCompactionTask] all compaction task finish, target file is {},"
- + "time cost is {} s",
+ + "time cost is {} s, compaction speed is {} MB/s",
fullStorageGroupName,
targetTsFileResource.getTsFile().getName(),
- costTime / 1000);
+ costTime,
+ ((double) selectedFileSize) / 1024.0d / 1024.0d / costTime);
if (logFile.exists()) {
FileUtils.delete(logFile);
}
} catch (Throwable throwable) {
- LOGGER.error(
- "{} [Compaction] Throwable is caught during execution of SizeTieredCompaction, {}",
- fullStorageGroupName,
- throwable);
LOGGER.warn("{} [Compaction] Start to handle exception", fullStorageGroupName);
if (sizeTieredCompactionLogger != null) {
sizeTieredCompactionLogger.close();
}
- InnerSpaceCompactionExceptionHandler.handleException(
- fullStorageGroupName,
- logFile,
- targetTsFileResource,
- selectedTsFileResourceList,
- tsFileManager,
- tsFileResourceList);
+ if (isSequence()) {
+ CompactionExceptionHandler.handleException(
+ fullStorageGroupName,
+ logFile,
+ targetTsFileList,
+ selectedTsFileResourceList,
+ Collections.emptyList(),
+ tsFileManager,
+ timePartition,
+ true,
+ isSequence());
+ } else {
+ CompactionExceptionHandler.handleException(
+ fullStorageGroupName,
+ logFile,
+ targetTsFileList,
+ Collections.emptyList(),
+ selectedTsFileResourceList,
+ tsFileManager,
+ timePartition,
+ true,
+ isSequence());
+ }
+ throw throwable;
} finally {
- releaseFileLocksAndResetMergingStatus(true);
+ releaseFileLocksAndResetMergingStatus();
}
}
@@ -227,7 +243,6 @@ public boolean equalsOtherTask(AbstractCompactionTask other) {
@Override
public boolean checkValidAndSetMerging() {
- selectedTsFileResourceList.forEach(x -> x.setCompactionCandidate(false));
for (int i = 0; i < selectedTsFileResourceList.size(); ++i) {
TsFileResource resource = selectedTsFileResourceList.get(i);
resource.readLock();
@@ -238,13 +253,13 @@ public boolean checkValidAndSetMerging() {
|| resource.isDeleted()) {
// this source file cannot be compacted
// release the lock of locked files, and return
- releaseFileLocksAndResetMergingStatus(false);
+ releaseFileLocksAndResetMergingStatus();
return false;
}
}
for (TsFileResource resource : selectedTsFileResourceList) {
- resource.setCompacting(true);
+ resource.setStatus(TsFileResourceStatus.COMPACTING);
}
return true;
}
@@ -253,7 +268,7 @@ public boolean checkValidAndSetMerging() {
* release the read lock and write lock of files if it is held, and set the merging status of
* selected files to false
*/
- private void releaseFileLocksAndResetMergingStatus(boolean resetCompactingStatus) {
+ private void releaseFileLocksAndResetMergingStatus() {
for (int i = 0; i < selectedTsFileResourceList.size(); ++i) {
if (isHoldingReadLock[i]) {
selectedTsFileResourceList.get(i).readUnlock();
@@ -261,9 +276,7 @@ private void releaseFileLocksAndResetMergingStatus(boolean resetCompactingStatus
if (isHoldingWriteLock[i]) {
selectedTsFileResourceList.get(i).writeUnlock();
}
- if (resetCompactingStatus) {
- selectedTsFileResourceList.get(i).setCompacting(false);
- }
+ selectedTsFileResourceList.get(i).setStatus(TsFileResourceStatus.CLOSED);
}
}
}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/utils/AlignedSeriesCompactionExecutor.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/utils/AlignedSeriesCompactionExecutor.java
index a4f4ef9912964..af5353153f315 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/utils/AlignedSeriesCompactionExecutor.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/utils/AlignedSeriesCompactionExecutor.java
@@ -19,6 +19,10 @@
package org.apache.iotdb.db.engine.compaction.inner.utils;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.engine.compaction.CompactionMetricsManager;
+import org.apache.iotdb.db.engine.compaction.CompactionTaskManager;
+import org.apache.iotdb.db.engine.compaction.constant.CompactionType;
+import org.apache.iotdb.db.engine.compaction.constant.ProcessChunkType;
import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
import org.apache.iotdb.tsfile.file.metadata.AlignedChunkMetadata;
import org.apache.iotdb.tsfile.file.metadata.IChunkMetadata;
@@ -33,6 +37,8 @@
import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
import org.apache.iotdb.tsfile.write.writer.TsFileIOWriter;
+import com.google.common.util.concurrent.RateLimiter;
+
import java.io.IOException;
import java.util.ArrayList;
import java.util.Comparator;
@@ -51,6 +57,8 @@ public class AlignedSeriesCompactionExecutor {
private final AlignedChunkWriterImpl chunkWriter;
private final List schemaList;
private long remainingPointInChunkWriter = 0L;
+ private final RateLimiter rateLimiter =
+ CompactionTaskManager.getInstance().getMergeWriteRateLimiter();
private final long chunkSizeThreshold =
IoTDBDescriptor.getInstance().getConfig().getTargetChunkSize();
@@ -111,12 +119,20 @@ public void execute() throws IOException {
TsFileAlignedSeriesReaderIterator readerIterator =
new TsFileAlignedSeriesReaderIterator(reader, alignedChunkMetadataList, schemaList);
while (readerIterator.hasNext()) {
- AlignedChunkReader chunkReader = readerIterator.nextReader();
- compactOneAlignedChunk(chunkReader);
+ Pair chunkReaderAndChunkSize = readerIterator.nextReader();
+ CompactionMetricsManager.recordReadInfo(chunkReaderAndChunkSize.right);
+ compactOneAlignedChunk(chunkReaderAndChunkSize.left);
}
}
if (remainingPointInChunkWriter != 0L) {
+ CompactionTaskManager.mergeRateLimiterAcquire(
+ rateLimiter, chunkWriter.estimateMaxSeriesMemSize());
+ CompactionMetricsManager.recordWriteInfo(
+ CompactionType.INNER_SEQ_COMPACTION,
+ ProcessChunkType.DESERIALIZE_CHUNK,
+ true,
+ chunkWriter.estimateMaxSeriesMemSize());
chunkWriter.writeToFileWriter(writer);
}
}
@@ -148,6 +164,13 @@ private void compactOneAlignedChunk(AlignedChunkReader chunkReader) throws IOExc
private void flushChunkWriterIfLargeEnough() throws IOException {
if (remainingPointInChunkWriter >= chunkPointNumThreshold
|| chunkWriter.estimateMaxSeriesMemSize() >= chunkSizeThreshold * schemaList.size()) {
+ CompactionTaskManager.mergeRateLimiterAcquire(
+ rateLimiter, chunkWriter.estimateMaxSeriesMemSize());
+ CompactionMetricsManager.recordWriteInfo(
+ CompactionType.INNER_SEQ_COMPACTION,
+ ProcessChunkType.DESERIALIZE_CHUNK,
+ true,
+ chunkWriter.estimateMaxSeriesMemSize());
chunkWriter.writeToFileWriter(writer);
remainingPointInChunkWriter = 0L;
}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/utils/InnerSpaceCompactionUtils.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/utils/InnerSpaceCompactionUtils.java
index 91f5a9b5428bb..cf402099c79ad 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/utils/InnerSpaceCompactionUtils.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/utils/InnerSpaceCompactionUtils.java
@@ -25,11 +25,11 @@
import org.apache.iotdb.db.engine.compaction.cross.rewrite.manage.CrossSpaceCompactionResource;
import org.apache.iotdb.db.engine.compaction.cross.rewrite.selector.ICrossSpaceMergeFileSelector;
import org.apache.iotdb.db.engine.compaction.cross.rewrite.selector.RewriteCompactionFileSelector;
-import org.apache.iotdb.db.engine.compaction.utils.log.CompactionLogger;
import org.apache.iotdb.db.engine.modification.Modification;
import org.apache.iotdb.db.engine.modification.ModificationFile;
import org.apache.iotdb.db.engine.storagegroup.TsFileManager;
import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
+import org.apache.iotdb.db.engine.storagegroup.TsFileResourceStatus;
import org.apache.iotdb.db.exception.metadata.MetadataException;
import org.apache.iotdb.db.exception.metadata.PathNotExistException;
import org.apache.iotdb.db.metadata.path.PartialPath;
@@ -235,7 +235,7 @@ public static void combineModsInCompaction(
public static boolean deleteTsFile(TsFileResource seqFile) {
try {
FileReaderManager.getInstance().closeFileAndRemoveReader(seqFile.getTsFilePath());
- seqFile.setDeleted(true);
+ seqFile.setStatus(TsFileResourceStatus.DELETED);
seqFile.delete();
} catch (IOException e) {
logger.error(e.getMessage(), e);
@@ -256,16 +256,6 @@ public static ICrossSpaceMergeFileSelector getCrossSpaceFileSelector(
}
}
- public static File[] findInnerSpaceCompactionLogs(String directory) {
- File timePartitionDir = new File(directory);
- if (timePartitionDir.exists()) {
- return timePartitionDir.listFiles(
- (dir, name) -> name.endsWith(CompactionLogger.INNER_COMPACTION_LOG_NAME_SUFFIX));
- } else {
- return new File[0];
- }
- }
-
public static class TsFileNameComparator implements Comparator {
@Override
@@ -280,6 +270,13 @@ public int compare(TsFileSequenceReader o1, TsFileSequenceReader o2) {
*/
public static void moveTargetFile(TsFileResource targetResource, String fullStorageGroupName)
throws IOException {
+ if (!targetResource.getTsFile().exists()) {
+ logger.info(
+ "{} [Compaction] Tmp target tsfile {} may be deleted after compaction.",
+ fullStorageGroupName,
+ targetResource.getTsFilePath());
+ return;
+ }
if (!targetResource.getTsFilePath().endsWith(IoTDBConstant.INNER_COMPACTION_TMP_FILE_SUFFIX)) {
logger.warn(
"{} [Compaction] Tmp target tsfile {} should be end with {}",
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/utils/MultiTsFileDeviceIterator.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/utils/MultiTsFileDeviceIterator.java
index cb76dfdcdc607..69e9ab0e10377 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/utils/MultiTsFileDeviceIterator.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/utils/MultiTsFileDeviceIterator.java
@@ -94,7 +94,10 @@ public MultiTsFileDeviceIterator(
public boolean hasNextDevice() {
boolean hasNext = false;
for (TsFileDeviceIterator iterator : deviceIteratorMap.values()) {
- hasNext = hasNext || iterator.hasNext() || !iterator.current().equals(currentDevice);
+ hasNext =
+ hasNext
+ || iterator.hasNext()
+ || (iterator.current() != null && !iterator.current().equals(currentDevice));
}
return hasNext;
}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/utils/SingleSeriesCompactionExecutor.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/utils/SingleSeriesCompactionExecutor.java
index 068ee2cc842df..cf9a04f41d606 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/utils/SingleSeriesCompactionExecutor.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/inner/utils/SingleSeriesCompactionExecutor.java
@@ -19,9 +19,13 @@
package org.apache.iotdb.db.engine.compaction.inner.utils;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.engine.compaction.CompactionMetricsManager;
import org.apache.iotdb.db.engine.compaction.CompactionTaskManager;
+import org.apache.iotdb.db.engine.compaction.constant.CompactionType;
+import org.apache.iotdb.db.engine.compaction.constant.ProcessChunkType;
import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
import org.apache.iotdb.db.metadata.path.PartialPath;
+import org.apache.iotdb.metrics.config.MetricConfigDescriptor;
import org.apache.iotdb.tsfile.file.metadata.ChunkMetadata;
import org.apache.iotdb.tsfile.read.TimeValuePair;
import org.apache.iotdb.tsfile.read.TsFileSequenceReader;
@@ -66,6 +70,8 @@ public class SingleSeriesCompactionExecutor {
IoTDBDescriptor.getInstance().getConfig().getChunkSizeLowerBoundInCompaction();
private final long chunkPointNumLowerBound =
IoTDBDescriptor.getInstance().getConfig().getChunkPointNumLowerBoundInCompaction();
+ private final boolean enableMetrics =
+ MetricConfigDescriptor.getInstance().getMetricConfig().getEnableMetric();
public SingleSeriesCompactionExecutor(
PartialPath series,
@@ -95,6 +101,8 @@ public void execute() throws IOException {
List chunkMetadataList = readerListPair.right;
for (ChunkMetadata chunkMetadata : chunkMetadataList) {
Chunk currentChunk = reader.readMemChunk(chunkMetadata);
+ CompactionMetricsManager.recordReadInfo(
+ currentChunk.getHeader().getSerializedSize() + currentChunk.getHeader().getDataSize());
// if this chunk is modified, deserialize it into points
if (chunkMetadata.getDeleteIntervalList() != null) {
@@ -117,7 +125,7 @@ public void execute() throws IOException {
// after all the chunk of this sensor is read, flush the remaining data
if (cachedChunk != null) {
- flushChunkToFileWriter(cachedChunk, cachedChunkMetadata);
+ flushChunkToFileWriter(cachedChunk, cachedChunkMetadata, true);
cachedChunk = null;
cachedChunkMetadata = null;
} else if (pointCountInChunkWriter != 0L) {
@@ -154,7 +162,7 @@ private void processLargeChunk(Chunk chunk, ChunkMetadata chunkMetadata) throws
} else {
// there is no points remaining in ChunkWriter and no cached chunk
// flush it to file directly
- flushChunkToFileWriter(chunk, chunkMetadata);
+ flushChunkToFileWriter(chunk, chunkMetadata, false);
}
}
@@ -209,7 +217,14 @@ private void writeChunkIntoChunkWriter(Chunk chunk) throws IOException {
}
private void writeCachedChunkIntoChunkWriter() throws IOException {
- cachedChunk.getData().flip();
+ if (cachedChunk.getData().position() != 0) {
+ // If the position of cache chunk data buffer is 0,
+ // it means that the cache chunk is the first chunk cached,
+ // and it hasn't merged with any chunk yet.
+ // If we flip it, both the position and limit in the buffer will be 0,
+ // which leads to the lost of data.
+ cachedChunk.getData().flip();
+ }
writeChunkIntoChunkWriter(cachedChunk);
cachedChunk = null;
cachedChunkMetadata = null;
@@ -249,7 +264,8 @@ private void writeTimeAndValueToChunkWriter(TimeValuePair timeValuePair) {
}
}
- private void flushChunkToFileWriter(Chunk chunk, ChunkMetadata chunkMetadata) throws IOException {
+ private void flushChunkToFileWriter(
+ Chunk chunk, ChunkMetadata chunkMetadata, boolean isCachedChunk) throws IOException {
CompactionTaskManager.mergeRateLimiterAcquire(compactionRateLimiter, getChunkSize(chunk));
if (chunkMetadata.getStartTime() < minStartTimestamp) {
minStartTimestamp = chunkMetadata.getStartTime();
@@ -257,6 +273,11 @@ private void flushChunkToFileWriter(Chunk chunk, ChunkMetadata chunkMetadata) th
if (chunkMetadata.getEndTime() > maxEndTimestamp) {
maxEndTimestamp = chunkMetadata.getEndTime();
}
+ CompactionMetricsManager.recordWriteInfo(
+ CompactionType.INNER_SEQ_COMPACTION,
+ isCachedChunk ? ProcessChunkType.MERGE_CHUNK : ProcessChunkType.FLUSH_CHUNK,
+ false,
+ getChunkSize(chunk));
fileWriter.writeChunk(chunk, chunkMetadata);
}
@@ -265,6 +286,11 @@ private void flushChunkWriterIfLargeEnough() throws IOException {
|| chunkWriter.estimateMaxSeriesMemSize() >= targetChunkSize) {
CompactionTaskManager.mergeRateLimiterAcquire(
compactionRateLimiter, chunkWriter.estimateMaxSeriesMemSize());
+ CompactionMetricsManager.recordWriteInfo(
+ CompactionType.INNER_SEQ_COMPACTION,
+ ProcessChunkType.DESERIALIZE_CHUNK,
+ false,
+ chunkWriter.estimateMaxSeriesMemSize());
chunkWriter.writeToFileWriter(fileWriter);
pointCountInChunkWriter = 0L;
}
@@ -273,7 +299,7 @@ private void flushChunkWriterIfLargeEnough() throws IOException {
private void flushCachedChunkIfLargeEnough() throws IOException {
if (cachedChunk.getChunkStatistic().getCount() >= targetChunkPointNum
|| getChunkSize(cachedChunk) >= targetChunkSize) {
- flushChunkToFileWriter(cachedChunk, cachedChunkMetadata);
+ flushChunkToFileWriter(cachedChunk, cachedChunkMetadata, true);
cachedChunk = null;
cachedChunkMetadata = null;
}
@@ -282,6 +308,11 @@ private void flushCachedChunkIfLargeEnough() throws IOException {
private void flushChunkWriter() throws IOException {
CompactionTaskManager.mergeRateLimiterAcquire(
compactionRateLimiter, chunkWriter.estimateMaxSeriesMemSize());
+ CompactionMetricsManager.recordWriteInfo(
+ CompactionType.INNER_SEQ_COMPACTION,
+ ProcessChunkType.DESERIALIZE_CHUNK,
+ false,
+ chunkWriter.estimateMaxSeriesMemSize());
chunkWriter.writeToFileWriter(fileWriter);
pointCountInChunkWriter = 0L;
}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/task/AbstractCompactionTask.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/task/AbstractCompactionTask.java
index ce80aff12371c..089e888145270 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/task/AbstractCompactionTask.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/task/AbstractCompactionTask.java
@@ -21,38 +21,38 @@
import org.apache.iotdb.db.conf.IoTDBConstant;
import org.apache.iotdb.db.engine.compaction.CompactionTaskManager;
-import org.apache.iotdb.db.engine.compaction.cross.rewrite.task.RewriteCrossCompactionRecoverTask;
-import org.apache.iotdb.db.engine.compaction.inner.sizetiered.SizeTieredCompactionRecoverTask;
-import org.apache.iotdb.db.service.metrics.Metric;
-import org.apache.iotdb.db.service.metrics.MetricsService;
-import org.apache.iotdb.db.service.metrics.Tag;
-import org.apache.iotdb.metrics.config.MetricConfigDescriptor;
-import org.apache.iotdb.metrics.utils.MetricLevel;
+import org.apache.iotdb.db.engine.storagegroup.TsFileManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.Callable;
-import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
/**
* AbstractCompactionTask is the base class for all compaction task, it carries out the execution of
- * compaction. AbstractCompactionTask uses a template method, it execute the abstract function
- * doCompaction implemented by subclass, and decrease the currentTaskNum in
- * CompactionScheduler when the doCompaction finish.
+ * * compaction. AbstractCompactionTask uses a template method, it executes the abstract function *
+ * {@link AbstractCompactionTask#doCompaction()} implemented by subclass, and decrease the *
+ * currentTaskNum in CompactionScheduler when the {@link AbstractCompactionTask#doCompaction()} is *
+ * finished. The future returns the {@link CompactionTaskSummary} of this task execution.
*/
-public abstract class AbstractCompactionTask implements Callable {
+public abstract class AbstractCompactionTask implements Callable {
private static final Logger LOGGER =
LoggerFactory.getLogger(IoTDBConstant.COMPACTION_LOGGER_NAME);
protected String fullStorageGroupName;
protected long timePartition;
protected final AtomicInteger currentTaskNum;
+ protected final TsFileManager tsFileManager;
+ protected long timeCost = 0L;
public AbstractCompactionTask(
- String fullStorageGroupName, long timePartition, AtomicInteger currentTaskNum) {
+ String fullStorageGroupName,
+ long timePartition,
+ TsFileManager tsFileManager,
+ AtomicInteger currentTaskNum) {
this.fullStorageGroupName = fullStorageGroupName;
this.timePartition = timePartition;
+ this.tsFileManager = tsFileManager;
this.currentTaskNum = currentTaskNum;
}
@@ -61,34 +61,22 @@ public AbstractCompactionTask(
protected abstract void doCompaction() throws Exception;
@Override
- public Void call() throws Exception {
+ public CompactionTaskSummary call() throws Exception {
long startTime = System.currentTimeMillis();
currentTaskNum.incrementAndGet();
+ boolean isSuccess = false;
try {
doCompaction();
+ isSuccess = true;
} catch (Exception e) {
- LOGGER.error(e.getMessage(), e);
+ LOGGER.error("Running compaction task failed", e);
} finally {
- if (!(this instanceof RewriteCrossCompactionRecoverTask)
- && !(this instanceof SizeTieredCompactionRecoverTask)) {
- CompactionTaskManager.getInstance().removeRunningTaskFromList(this);
- }
+ CompactionTaskManager.getInstance().removeRunningTaskFromList(this);
+ timeCost = System.currentTimeMillis() - startTime;
this.currentTaskNum.decrementAndGet();
}
- if (MetricConfigDescriptor.getInstance().getMetricConfig().getEnableMetric()) {
- MetricsService.getInstance()
- .getMetricManager()
- .timer(
- System.currentTimeMillis() - startTime,
- TimeUnit.MILLISECONDS,
- Metric.COST_TASK.toString(),
- MetricLevel.IMPORTANT,
- Tag.NAME.toString(),
- "compaction");
- }
-
- return null;
+ return new CompactionTaskSummary(isSuccess);
}
public String getFullStorageGroupName() {
@@ -118,4 +106,8 @@ public boolean equals(Object other) {
}
public abstract void resetCompactionCandidateStatusForAllSourceFiles();
+
+ public long getTimeCost() {
+ return timeCost;
+ }
}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/cross/CrossSpaceCompactionExceptionHandler.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/task/CompactionExceptionHandler.java
similarity index 62%
rename from server/src/main/java/org/apache/iotdb/db/engine/compaction/cross/CrossSpaceCompactionExceptionHandler.java
rename to server/src/main/java/org/apache/iotdb/db/engine/compaction/task/CompactionExceptionHandler.java
index 3da6de5308709..9ee0c66aa0e81 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/cross/CrossSpaceCompactionExceptionHandler.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/task/CompactionExceptionHandler.java
@@ -17,13 +17,15 @@
* under the License.
*/
-package org.apache.iotdb.db.engine.compaction.cross;
+package org.apache.iotdb.db.engine.compaction.task;
import org.apache.iotdb.db.conf.IoTDBConstant;
+import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.engine.compaction.CompactionUtils;
import org.apache.iotdb.db.engine.storagegroup.TsFileManager;
import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
import org.apache.iotdb.db.engine.storagegroup.TsFileResourceList;
+import org.apache.iotdb.db.engine.storagegroup.TsFileResourceStatus;
import org.apache.iotdb.db.rescon.TsFileResourceManager;
import org.apache.iotdb.tsfile.utils.TsFileUtils;
@@ -36,18 +38,21 @@
import java.util.ArrayList;
import java.util.List;
-public class CrossSpaceCompactionExceptionHandler {
+public class CompactionExceptionHandler {
private static final Logger LOGGER =
LoggerFactory.getLogger(IoTDBConstant.COMPACTION_LOGGER_NAME);
public static void handleException(
- String storageGroup,
+ String fullStorageGroupName,
File logFile,
List targetResourceList,
List seqResourceList,
List unseqResourceList,
TsFileManager tsFileManager,
- long timePartition) {
+ long timePartition,
+ boolean isInnerSpace,
+ boolean isTargetSequence) {
+ String compactionType = isInnerSpace ? "inner" : "cross";
try {
if (logFile == null || !logFile.exists()) {
// the log file is null or the log file does not exists
@@ -56,42 +61,45 @@ public static void handleException(
return;
}
LOGGER.info(
- "[Compaction][ExceptionHandler] Cross space compaction start handling exception, source seqFiles is "
- + seqResourceList
- + ", source unseqFiles is "
- + unseqResourceList);
+ "{} [Compaction][ExceptionHandler] {} space compaction start handling exception, source seqFiles is {}, source unseqFiles is {}.",
+ fullStorageGroupName,
+ compactionType,
+ seqResourceList,
+ unseqResourceList);
boolean handleSuccess = true;
List lostSourceFiles = new ArrayList<>();
- boolean allSeqFilesExist = checkAllSourceFileExists(seqResourceList, lostSourceFiles);
- boolean allUnseqFilesExist = checkAllSourceFileExists(unseqResourceList, lostSourceFiles);
+ boolean allSourceSeqFilesExist = checkAllSourceFileExists(seqResourceList, lostSourceFiles);
+ boolean allSourceUnseqFilesExist =
+ checkAllSourceFileExists(unseqResourceList, lostSourceFiles);
- if (allSeqFilesExist && allUnseqFilesExist) {
- // all source files exists, remove target file and recover memory
+ if (allSourceSeqFilesExist && allSourceUnseqFilesExist) {
handleSuccess =
handleWhenAllSourceFilesExist(
- storageGroup,
targetResourceList,
seqResourceList,
unseqResourceList,
tsFileManager,
- timePartition);
+ timePartition,
+ isTargetSequence,
+ fullStorageGroupName);
} else {
handleSuccess =
handleWhenSomeSourceFilesLost(
- storageGroup,
+ targetResourceList,
seqResourceList,
unseqResourceList,
- targetResourceList,
- lostSourceFiles);
+ lostSourceFiles,
+ fullStorageGroupName);
}
if (!handleSuccess) {
LOGGER.error(
- "[Compaction][ExceptionHandler] failed to handle exception, set allowCompaction to false in {}",
- storageGroup);
+ "[Compaction][ExceptionHandler] Fail to handle {} space compaction exception, set allowCompaction to false in {}",
+ compactionType,
+ fullStorageGroupName);
tsFileManager.setAllowCompaction(false);
} else {
FileUtils.delete(logFile);
@@ -100,9 +108,9 @@ public static void handleException(
// catch throwable when handling exception
// set the allowCompaction to false
LOGGER.error(
- "[Compaction][ExceptionHandler] exception occurs when handling exception in cross space compaction."
- + " Set allowCompaction to false in {}",
- storageGroup,
+ "[Compaction][ExceptionHandler] exception occurs when handling exception in {} space compaction. Set allowCompaction to false in {}",
+ compactionType,
+ fullStorageGroupName,
throwable);
tsFileManager.setAllowCompaction(false);
}
@@ -124,12 +132,13 @@ private static boolean checkAllSourceFileExists(
* under OOM errors, we do not check whether the target files are complete.
*/
private static boolean handleWhenAllSourceFilesExist(
- String storageGroup,
- List targetTsFiles,
- List seqFileList,
- List unseqFileList,
+ List targetResourceList,
+ List sourceSeqResourceList,
+ List sourceUnseqResourceList,
TsFileManager tsFileManager,
- long timePartition)
+ long timePartition,
+ boolean isTargetSequence,
+ String fullStorageGroupName)
throws IOException {
TsFileResourceList unseqTsFileResourceList =
tsFileManager.getUnsequenceListByTimePartition(timePartition);
@@ -137,18 +146,18 @@ private static boolean handleWhenAllSourceFilesExist(
tsFileManager.getSequenceListByTimePartition(timePartition);
// delete compaction mods files
- CompactionUtils.deleteCompactionModsFile(seqFileList, unseqFileList);
+ CompactionUtils.deleteCompactionModsFile(sourceSeqResourceList, sourceUnseqResourceList);
boolean removeAllTargetFile = true;
- tsFileManager.writeLock("CrossSpaceCompactionExceptionHandler");
+ tsFileManager.writeLock("CompactionExceptionHandler");
try {
- for (TsFileResource targetTsFile : targetTsFiles) {
- // delete target files
+ for (TsFileResource targetTsFile : targetResourceList) {
+ // delete target file
targetTsFile.writeLock();
if (!targetTsFile.remove()) {
LOGGER.error(
- "{} [Compaction][Exception] failed to delete target tsfile {} when handling exception",
- storageGroup,
+ "{} [Compaction][Exception] fail to delete target tsfile {} when handling exception",
+ fullStorageGroupName,
targetTsFile);
removeAllTargetFile = false;
}
@@ -156,19 +165,23 @@ private static boolean handleWhenAllSourceFilesExist(
// remove target tsfile resource in memory
if (targetTsFile.isFileInList()) {
- seqTsFileResourceList.remove(targetTsFile);
+ if (isTargetSequence) {
+ seqTsFileResourceList.remove(targetTsFile);
+ } else {
+ unseqTsFileResourceList.remove(targetTsFile);
+ }
TsFileResourceManager.getInstance().removeTsFileResource(targetTsFile);
}
}
// recover source tsfile resource in memory
- for (TsFileResource tsFileResource : seqFileList) {
+ for (TsFileResource tsFileResource : sourceSeqResourceList) {
if (!tsFileResource.isFileInList()) {
seqTsFileResourceList.keepOrderInsert(tsFileResource);
TsFileResourceManager.getInstance().registerSealedTsFileResource(tsFileResource);
}
}
- for (TsFileResource tsFileResource : unseqFileList) {
+ for (TsFileResource tsFileResource : sourceUnseqResourceList) {
if (!tsFileResource.isFileInList()) {
unseqTsFileResourceList.keepOrderInsert(tsFileResource);
TsFileResourceManager.getInstance().registerSealedTsFileResource(tsFileResource);
@@ -181,38 +194,40 @@ private static boolean handleWhenAllSourceFilesExist(
}
/**
- * Some source files are lost, check if the compaction has finished. If the compaction has
- * finished, delete the remaining source files and compaction mods files. If the compaction has
- * not finished, set the allowCompaction in tsFileManager to false and print some error logs.
+ * Some source files are lost, check if all target files are complete. If all target files are
+ * complete, delete the remaining source files and compaction mods files. If some target files are
+ * not complete, set the allowCompaction in tsFileManager to false and print some error logs.
*/
- public static boolean handleWhenSomeSourceFilesLost(
- String storageGroup,
- List seqFileList,
- List unseqFileList,
- List targetFileList,
- List lostSourceFiles)
+ private static boolean handleWhenSomeSourceFilesLost(
+ List targetResourceList,
+ List sourceSeqResourceList,
+ List sourceUnseqResourceList,
+ List lostSourceResourceList,
+ String fullStorageGroupName)
throws IOException {
- if (!checkIsTargetFilesComplete(targetFileList, lostSourceFiles, storageGroup)) {
+ // check whether is all target files complete
+ if (!checkIsTargetFilesComplete(
+ targetResourceList, lostSourceResourceList, fullStorageGroupName)) {
return false;
}
// delete source files
- for (TsFileResource unseqFile : unseqFileList) {
- unseqFile.remove();
- unseqFile.setDeleted(true);
+ for (TsFileResource resource : sourceSeqResourceList) {
+ resource.setStatus(TsFileResourceStatus.DELETED);
+ resource.remove();
}
- for (TsFileResource seqFile : seqFileList) {
- seqFile.remove();
- seqFile.setDeleted(true);
+ for (TsFileResource resource : sourceUnseqResourceList) {
+ resource.setStatus(TsFileResourceStatus.DELETED);
+ resource.remove();
}
// delete compaction mods files
- CompactionUtils.deleteCompactionModsFile(seqFileList, unseqFileList);
+ CompactionUtils.deleteCompactionModsFile(sourceSeqResourceList, sourceUnseqResourceList);
return true;
}
- public static boolean checkIsTargetFilesComplete(
+ private static boolean checkIsTargetFilesComplete(
List targetResources,
List lostSourceResources,
String fullStorageGroupName)
@@ -224,6 +239,7 @@ public static boolean checkIsTargetFilesComplete(
fullStorageGroupName,
targetResource,
lostSourceResources);
+ IoTDBDescriptor.getInstance().getConfig().setReadOnly(true);
return false;
}
}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/task/CompactionRecoverManager.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/task/CompactionRecoverManager.java
new file mode 100644
index 0000000000000..c6b9783f34bc4
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/task/CompactionRecoverManager.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.db.engine.compaction.task;
+
+import org.apache.iotdb.db.conf.IoTDBConstant;
+import org.apache.iotdb.db.conf.directories.DirectoryManager;
+import org.apache.iotdb.db.engine.compaction.utils.log.CompactionLogger;
+import org.apache.iotdb.db.engine.storagegroup.TsFileManager;
+import org.apache.iotdb.tsfile.fileSystem.FSFactoryProducer;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.util.List;
+import java.util.regex.Pattern;
+
+import static org.apache.iotdb.db.engine.compaction.utils.log.CompactionLogger.CROSS_COMPACTION_LOG_NAME_FROM_OLD;
+import static org.apache.iotdb.db.engine.compaction.utils.log.CompactionLogger.INNER_COMPACTION_LOG_NAME_SUFFIX_FROM_OLD;
+
+/**
+ * CompactionRecoverManager searches compaction log and call {@link CompactionRecoverTask} to
+ * execute the recover process for all compaction task sequentially, including InnerCompactionTask
+ * in sequence/unsequence space, CrossSpaceCompaction.
+ */
+public class CompactionRecoverManager {
+ private static final Logger logger =
+ LoggerFactory.getLogger(IoTDBConstant.COMPACTION_LOGGER_NAME);
+ private final TsFileManager tsFileManager;
+ private final String logicalStorageGroupName;
+ private final String virtualStorageGroupId;
+
+ public CompactionRecoverManager(
+ TsFileManager tsFileManager, String logicalStorageGroupName, String virtualStorageGroupId) {
+ this.tsFileManager = tsFileManager;
+ this.logicalStorageGroupName = logicalStorageGroupName;
+ this.virtualStorageGroupId = virtualStorageGroupId;
+ }
+
+ public void recoverInnerSpaceCompaction(boolean isSequence) {
+ logger.info("recovering inner compaction");
+ recoverCompactionBefore013(true);
+ recoverCompaction(true, isSequence);
+ }
+
+ public void recoverCrossSpaceCompaction() {
+ logger.info("recovering cross compaction");
+ recoverCompactionBefore013(false);
+ recoverCompaction(false, true);
+ }
+
+ private void recoverCompaction(boolean isInnerSpace, boolean isLogSequence) {
+ List dirs;
+ if (isLogSequence) {
+ dirs = DirectoryManager.getInstance().getAllSequenceFileFolders();
+ } else {
+ dirs = DirectoryManager.getInstance().getAllUnSequenceFileFolders();
+ }
+ for (String dir : dirs) {
+ File storageGroupDir =
+ new File(
+ dir
+ + File.separator
+ + logicalStorageGroupName
+ + File.separator
+ + virtualStorageGroupId);
+ if (!storageGroupDir.exists()) {
+ return;
+ }
+ File[] timePartitionDirs = storageGroupDir.listFiles();
+ if (timePartitionDirs == null) {
+ return;
+ }
+ for (File timePartitionDir : timePartitionDirs) {
+ if (!timePartitionDir.isDirectory()
+ || !Pattern.compile("[0-9]*").matcher(timePartitionDir.getName()).matches()) {
+ continue;
+ }
+ File[] compactionLogs =
+ CompactionLogger.findCompactionLogs(isInnerSpace, timePartitionDir.getPath());
+ for (File compactionLog : compactionLogs) {
+ logger.info("Calling compaction recover task.");
+ new CompactionRecoverTask(
+ logicalStorageGroupName,
+ virtualStorageGroupId,
+ tsFileManager,
+ compactionLog,
+ isInnerSpace)
+ .doCompaction();
+ }
+ }
+ }
+ }
+
+ /** Check whether there is old compaction log from previous version (<0.13) and recover it. */
+ private void recoverCompactionBefore013(boolean isInnerSpace) {
+ String oldLogName =
+ isInnerSpace
+ ? logicalStorageGroupName + INNER_COMPACTION_LOG_NAME_SUFFIX_FROM_OLD
+ : CROSS_COMPACTION_LOG_NAME_FROM_OLD;
+ File logFileFromOld =
+ FSFactoryProducer.getFSFactory().getFile(tsFileManager.getStorageGroupDir(), oldLogName);
+
+ if (logFileFromOld.exists()) {
+ logger.info("Calling compaction task to recover from previous version.");
+ new CompactionRecoverTask(
+ logicalStorageGroupName,
+ virtualStorageGroupId,
+ tsFileManager,
+ logFileFromOld,
+ isInnerSpace)
+ .doCompaction();
+ }
+ }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/task/CompactionRecoverTask.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/task/CompactionRecoverTask.java
index f4d19bf691fa6..af306c11273b0 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/task/CompactionRecoverTask.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/task/CompactionRecoverTask.java
@@ -20,102 +20,479 @@
import org.apache.iotdb.db.conf.IoTDBConstant;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.conf.directories.DirectoryManager;
+import org.apache.iotdb.db.engine.compaction.CompactionUtils;
+import org.apache.iotdb.db.engine.compaction.TsFileIdentifier;
+import org.apache.iotdb.db.engine.compaction.utils.log.CompactionLogAnalyzer;
import org.apache.iotdb.db.engine.compaction.utils.log.CompactionLogger;
+import org.apache.iotdb.db.engine.modification.Modification;
+import org.apache.iotdb.db.engine.modification.ModificationFile;
import org.apache.iotdb.db.engine.storagegroup.TsFileManager;
+import org.apache.iotdb.db.engine.storagegroup.TsFileNameGenerator;
+import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
+import org.apache.iotdb.db.utils.FileLoaderUtils;
+import org.apache.iotdb.tsfile.common.constant.TsFileConstant;
+import org.apache.iotdb.tsfile.fileSystem.FSFactoryProducer;
+import org.apache.iotdb.tsfile.read.TsFileSequenceReader;
+import org.apache.iotdb.tsfile.utils.TsFileUtils;
+import org.apache.commons.io.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
-import java.util.regex.Pattern;
-/**
- * CompactionRecoverTask execute the recover process for all compaction task sequentially, including
- * InnerCompactionTask in sequence/unsequence space, CrossSpaceCompaction.
- */
+/** CompactionRecoverTask executes the recover process for all compaction tasks. */
public class CompactionRecoverTask {
- private static final Logger logger =
- LoggerFactory.getLogger(IoTDBConstant.COMPACTION_LOGGER_NAME);
- private TsFileManager tsFileManager;
- private String logicalStorageGroupName;
- private String virtualStorageGroupId;
+ private final Logger LOGGER = LoggerFactory.getLogger(IoTDBConstant.COMPACTION_LOGGER_NAME);
+ private final File compactionLogFile;
+ private final boolean isInnerSpace;
+ private final String fullStorageGroupName;
+ private final TsFileManager tsFileManager;
public CompactionRecoverTask(
- TsFileManager tsFileManager, String logicalStorageGroupName, String virtualStorageGroupId) {
+ String logicalStorageGroupName,
+ String virtualStorageGroupName,
+ TsFileManager tsFileManager,
+ File logFile,
+ boolean isInnerSpace) {
+ this.compactionLogFile = logFile;
+ this.isInnerSpace = isInnerSpace;
+ this.fullStorageGroupName = logicalStorageGroupName + "-" + virtualStorageGroupName;
this.tsFileManager = tsFileManager;
- this.logicalStorageGroupName = logicalStorageGroupName;
- this.virtualStorageGroupId = virtualStorageGroupId;
}
- public void recoverCrossSpaceCompaction() throws Exception {
- logger.info("recovering cross compaction");
- recoverCrossCompactionFromOldVersion();
- recoverCrossCompaction();
- logger.info("try to synchronize CompactionScheduler");
+ public void doCompaction() {
+ boolean recoverSuccess = true;
+ LOGGER.info(
+ "{} [Compaction][Recover] compaction log is {}", fullStorageGroupName, compactionLogFile);
+ try {
+ if (compactionLogFile.exists()) {
+ LOGGER.info(
+ "{} [Compaction][Recover] compaction log file {} exists, start to recover it",
+ fullStorageGroupName,
+ compactionLogFile);
+ CompactionLogAnalyzer logAnalyzer = new CompactionLogAnalyzer(compactionLogFile);
+ CompactionRecoverFromOld compactionRecoverFromOld = new CompactionRecoverFromOld();
+ if (isInnerSpace && compactionRecoverFromOld.isInnerCompactionLogBefore013()) {
+ // inner compaction log from previous version (<0.13)
+ logAnalyzer.analyzeOldInnerCompactionLog();
+ } else if (!isInnerSpace && compactionRecoverFromOld.isCrossCompactionLogBefore013()) {
+ // cross compaction log from previous version (<0.13)
+ logAnalyzer.analyzeOldCrossCompactionLog();
+ } else {
+ logAnalyzer.analyze();
+ }
+ List sourceFileIdentifiers = logAnalyzer.getSourceFileInfos();
+ List targetFileIdentifiers = logAnalyzer.getTargetFileInfos();
+
+ // compaction log file is incomplete
+ if (targetFileIdentifiers.isEmpty() || sourceFileIdentifiers.isEmpty()) {
+ LOGGER.info(
+ "{} [Compaction][Recover] incomplete log file, abort recover", fullStorageGroupName);
+ return;
+ }
+
+ // check is all source files existed
+ boolean isAllSourcesFileExisted = true;
+ for (TsFileIdentifier sourceFileIdentifier : sourceFileIdentifiers) {
+ File sourceFile = sourceFileIdentifier.getFileFromDataDirs();
+ if (sourceFile == null) {
+ isAllSourcesFileExisted = false;
+ break;
+ }
+ }
+
+ if (isAllSourcesFileExisted) {
+ if (!isInnerSpace && logAnalyzer.isLogFromOld()) {
+ recoverSuccess =
+ compactionRecoverFromOld.handleCrossCompactionWithAllSourceFilesExistBefore013(
+ targetFileIdentifiers);
+ } else {
+ recoverSuccess =
+ handleWithAllSourceFilesExist(targetFileIdentifiers, sourceFileIdentifiers);
+ }
+ } else {
+ if (!isInnerSpace && logAnalyzer.isLogFromOld()) {
+ recoverSuccess =
+ compactionRecoverFromOld.handleCrossCompactionWithSomeSourceFilesLostBefore013(
+ targetFileIdentifiers, sourceFileIdentifiers);
+ } else {
+ recoverSuccess =
+ handleWithSomeSourceFilesLost(targetFileIdentifiers, sourceFileIdentifiers);
+ }
+ }
+ }
+ } catch (IOException e) {
+ LOGGER.error("Recover compaction error", e);
+ } finally {
+ if (!recoverSuccess) {
+ LOGGER.error(
+ "{} [Compaction][Recover] Failed to recover compaction, set allowCompaction to false",
+ fullStorageGroupName);
+ tsFileManager.setAllowCompaction(false);
+ } else {
+ if (compactionLogFile.exists()) {
+ try {
+ LOGGER.info(
+ "{} [Compaction][Recover] Recover compaction successfully, delete log file {}",
+ fullStorageGroupName,
+ compactionLogFile);
+ FileUtils.delete(compactionLogFile);
+ } catch (IOException e) {
+ LOGGER.error(
+ "{} [Compaction][Recover] Exception occurs while deleting log file {}, set allowCompaction to false",
+ fullStorageGroupName,
+ compactionLogFile,
+ e);
+ tsFileManager.setAllowCompaction(false);
+ }
+ }
+ }
+ }
}
- private void recoverCrossCompaction() throws Exception {
- List sequenceDirs = DirectoryManager.getInstance().getAllSequenceFileFolders();
- for (String dir : sequenceDirs) {
- File storageGroupDir =
- new File(
- dir
- + File.separator
- + logicalStorageGroupName
- + File.separator
- + virtualStorageGroupId);
- if (!storageGroupDir.exists()) {
- return;
+ /**
+ * All source files exist: (1) delete all the target files and tmp target files (2) delete
+ * compaction mods files.
+ */
+ private boolean handleWithAllSourceFilesExist(
+ List targetFileIdentifiers, List sourceFileIdentifiers) {
+ LOGGER.info(
+ "{} [Compaction][Recover] all source files exists, delete all target files.",
+ fullStorageGroupName);
+
+ // remove tmp target files and target files
+ for (TsFileIdentifier targetFileIdentifier : targetFileIdentifiers) {
+ // xxx.inner or xxx.cross
+ File tmpTargetFile = targetFileIdentifier.getFileFromDataDirs();
+ // xxx.tsfile
+ File targetFile =
+ getFileFromDataDirs(
+ targetFileIdentifier
+ .getFilePath()
+ .replace(
+ isInnerSpace
+ ? IoTDBConstant.INNER_COMPACTION_TMP_FILE_SUFFIX
+ : IoTDBConstant.CROSS_COMPACTION_TMP_FILE_SUFFIX,
+ TsFileConstant.TSFILE_SUFFIX));
+ TsFileResource targetResource = null;
+ if (tmpTargetFile != null) {
+ targetResource = new TsFileResource(tmpTargetFile);
+ } else if (targetFile != null) {
+ targetResource = new TsFileResource(targetFile);
}
- File[] timePartitionDirs = storageGroupDir.listFiles();
- if (timePartitionDirs == null) {
- return;
+
+ if (targetResource != null && !targetResource.remove()) {
+ // failed to remove tmp target tsfile
+ // system should not carry out the subsequent compaction in case of data redundant
+ LOGGER.error(
+ "{} [Compaction][Recover] failed to remove target file {}",
+ fullStorageGroupName,
+ targetResource);
+ return false;
}
- for (File timePartitionDir : timePartitionDirs) {
- if (!timePartitionDir.isDirectory()
- || !Pattern.compile("[0-9]*").matcher(timePartitionDir.getName()).matches()) {
- continue;
+ }
+
+ // delete compaction mods files
+ List sourceTsFileResourceList = new ArrayList<>();
+ for (TsFileIdentifier sourceFileIdentifier : sourceFileIdentifiers) {
+ sourceTsFileResourceList.add(new TsFileResource(sourceFileIdentifier.getFileFromDataDirs()));
+ }
+ try {
+ CompactionUtils.deleteCompactionModsFile(sourceTsFileResourceList, Collections.emptyList());
+ } catch (Throwable e) {
+ LOGGER.error(
+ "{} [Compaction][Recover] Exception occurs while deleting compaction mods file, set allowCompaction to false",
+ fullStorageGroupName,
+ e);
+ return false;
+ }
+ return true;
+ }
+
+ /**
+ * Some source files lost: delete remaining source files, including: tsfile, resource file, mods
+ * file and compaction mods file.
+ */
+ private boolean handleWithSomeSourceFilesLost(
+ List targetFileIdentifiers, List sourceFileIdentifiers)
+ throws IOException {
+ // some source files have been deleted, while target file must exist and complete.
+ if (!checkIsTargetFilesComplete(targetFileIdentifiers)) {
+ return false;
+ }
+
+ boolean handleSuccess = true;
+ for (TsFileIdentifier sourceFileIdentifier : sourceFileIdentifiers) {
+ File sourceFile = sourceFileIdentifier.getFileFromDataDirs();
+ if (sourceFile != null) {
+ // delete source tsfile, resource file and mods file
+ if (!new TsFileResource(sourceFile).remove()) {
+ LOGGER.error(
+ "{} [Compaction][Recover] fail to delete remaining source file {}.",
+ fullStorageGroupName,
+ sourceFile);
+ handleSuccess = false;
+ }
+ } else {
+ // if source file does not exist, its resource file may still exist, so delete it.
+ File resourceFile =
+ getFileFromDataDirs(
+ sourceFileIdentifier.getFilePath() + TsFileResource.RESOURCE_SUFFIX);
+
+ if (!checkAndDeleteFile(resourceFile)) {
+ handleSuccess = false;
}
- File[] compactionLogs =
- CompactionLogger.findCrossSpaceCompactionLogs(timePartitionDir.getPath());
- for (File compactionLog : compactionLogs) {
- logger.info("calling cross compaction task");
- IoTDBDescriptor.getInstance()
- .getConfig()
- .getCrossCompactionStrategy()
- .getCompactionRecoverTask(
- logicalStorageGroupName,
- virtualStorageGroupId,
- Long.parseLong(timePartitionDir.getName()),
- compactionLog,
- tsFileManager)
- .call();
+
+ // delete .mods file of source tsfile
+ File modFile =
+ getFileFromDataDirs(sourceFileIdentifier.getFilePath() + ModificationFile.FILE_SUFFIX);
+ if (!checkAndDeleteFile(modFile)) {
+ handleSuccess = false;
}
}
+
+ // delete .compaction.mods file of all source files
+ File compactionModFile =
+ getFileFromDataDirs(
+ sourceFileIdentifier.getFilePath() + ModificationFile.COMPACTION_FILE_SUFFIX);
+ if (!checkAndDeleteFile(compactionModFile)) {
+ handleSuccess = false;
+ }
+ }
+ return handleSuccess;
+ }
+
+ /**
+ * This method find the File object of given filePath by searching it in every data directory. If
+ * the file is not found, it will return null.
+ */
+ private File getFileFromDataDirs(String filePath) {
+ String[] dataDirs = IoTDBDescriptor.getInstance().getConfig().getDataDirs();
+ for (String dataDir : dataDirs) {
+ File f = new File(dataDir, filePath);
+ if (f.exists()) {
+ return f;
+ }
+ }
+ return null;
+ }
+
+ private boolean checkIsTargetFilesComplete(List targetFileIdentifiers)
+ throws IOException {
+ for (TsFileIdentifier targetFileIdentifier : targetFileIdentifiers) {
+ // xxx.tsfile
+ File targetFile =
+ getFileFromDataDirs(
+ targetFileIdentifier
+ .getFilePath()
+ .replace(
+ isInnerSpace
+ ? IoTDBConstant.INNER_COMPACTION_TMP_FILE_SUFFIX
+ : IoTDBConstant.CROSS_COMPACTION_TMP_FILE_SUFFIX,
+ TsFileConstant.TSFILE_SUFFIX));
+ if (targetFile == null
+ || !TsFileUtils.isTsFileComplete(new TsFileResource(targetFile).getTsFile())) {
+ LOGGER.error(
+ "{} [Compaction][ExceptionHandler] target file {} is not complete, and some source files is lost, do nothing. Set allowCompaction to false",
+ fullStorageGroupName,
+ targetFileIdentifier.getFilePath());
+ IoTDBDescriptor.getInstance().getConfig().setReadOnly(true);
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Return true if the file is not existed or if the file is existed and has been deleted
+ * correctly. Otherwise, return false.
+ */
+ private boolean checkAndDeleteFile(File file) {
+ if ((file != null && file.exists()) && !file.delete()) {
+ LOGGER.error("{} [Compaction][Recover] failed to remove file {}", fullStorageGroupName, file);
+ return false;
}
+ return true;
}
- private void recoverCrossCompactionFromOldVersion() throws Exception {
- // check whether there is old compaction log from previous version (<0.13)
- File mergeLogFromOldVersion =
- new File(
- tsFileManager.getStorageGroupDir()
- + File.separator
- + CompactionLogger.CROSS_COMPACTION_LOG_NAME_FROM_OLD);
- if (mergeLogFromOldVersion.exists()) {
- logger.info("calling cross compaction task to recover from previous version.");
- IoTDBDescriptor.getInstance()
- .getConfig()
- .getCrossCompactionStrategy()
- .getCompactionRecoverTask(
- logicalStorageGroupName,
- virtualStorageGroupId,
- 0L,
- mergeLogFromOldVersion,
- tsFileManager)
- .call();
+ /**
+ * Used to check whether it is recoverd from last version (<0.13) and perform corresponding
+ * process.
+ */
+ private class CompactionRecoverFromOld {
+
+ /** Return whether cross compaction log file is from previous version (<0.13). */
+ private boolean isCrossCompactionLogBefore013() {
+ return compactionLogFile
+ .getName()
+ .equals(CompactionLogger.CROSS_COMPACTION_LOG_NAME_FROM_OLD);
+ }
+
+ /** Return whether inner compaction log file is from previous version (<0.13). */
+ private boolean isInnerCompactionLogBefore013() {
+ return compactionLogFile.getName().startsWith(tsFileManager.getStorageGroupName());
+ }
+
+ /** Delete tmp target file and compaction mods file. */
+ private boolean handleCrossCompactionWithAllSourceFilesExistBefore013(
+ List targetFileIdentifiers) {
+ // delete tmp target file
+ for (TsFileIdentifier targetFileIdentifier : targetFileIdentifiers) {
+ // xxx.tsfile.merge
+ File tmpTargetFile = targetFileIdentifier.getFileFromDataDirs();
+ if (tmpTargetFile != null) {
+ tmpTargetFile.delete();
+ }
+ }
+
+ // delete compaction mods file
+ File compactionModsFileFromOld =
+ new File(
+ tsFileManager.getStorageGroupDir()
+ + File.separator
+ + IoTDBConstant.COMPACTION_MODIFICATION_FILE_NAME_FROM_OLD);
+ if (!checkAndDeleteFile(compactionModsFileFromOld)) {
+ return false;
+ }
+ return true;
+ }
+
+ /**
+ * 1. If target file does not exist, then move .merge file to target file
+ * 2. If target resource file does not exist, then serialize it.
+ * 3. Append merging modification to target mods file and delete merging mods file.
+ * 4. Delete source files and .merge file.
+ */
+ private boolean handleCrossCompactionWithSomeSourceFilesLostBefore013(
+ List targetFileIdentifiers,
+ List sourceFileIdentifiers) {
+ try {
+ File compactionModsFileFromOld =
+ new File(
+ tsFileManager.getStorageGroupDir()
+ + File.separator
+ + IoTDBConstant.COMPACTION_MODIFICATION_FILE_NAME_FROM_OLD);
+ List targetFileResources = new ArrayList<>();
+ for (int i = 0; i < sourceFileIdentifiers.size(); i++) {
+ TsFileIdentifier sourceFileIdentifier = sourceFileIdentifiers.get(i);
+ if (sourceFileIdentifier.isSequence()) {
+ File tmpTargetFile = targetFileIdentifiers.get(i).getFileFromDataDirs();
+ File targetFile = null;
+
+ // move tmp target file to target file if not exist
+ if (tmpTargetFile != null) {
+ // move tmp target file to target file
+ String sourceFilePath =
+ tmpTargetFile
+ .getPath()
+ .replace(
+ TsFileConstant.TSFILE_SUFFIX
+ + IoTDBConstant.CROSS_COMPACTION_TMP_FILE_SUFFIX_FROM_OLD,
+ TsFileConstant.TSFILE_SUFFIX);
+ targetFile = TsFileNameGenerator.increaseCrossCompactionCnt(new File(sourceFilePath));
+ FSFactoryProducer.getFSFactory().moveFile(tmpTargetFile, targetFile);
+ } else {
+ // target file must exist
+ File file =
+ TsFileNameGenerator.increaseCrossCompactionCnt(
+ new File(
+ targetFileIdentifiers
+ .get(i)
+ .getFilePath()
+ .replace(
+ TsFileConstant.TSFILE_SUFFIX
+ + IoTDBConstant.CROSS_COMPACTION_TMP_FILE_SUFFIX_FROM_OLD,
+ TsFileConstant.TSFILE_SUFFIX)));
+
+ targetFile = getFileFromDataDirs(file.getPath());
+ }
+ if (targetFile == null) {
+ LOGGER.error(
+ "{} [Compaction][Recover] target file of source seq file {} does not exist (<0.13).",
+ fullStorageGroupName,
+ sourceFileIdentifier.getFilePath());
+ return false;
+ }
+
+ // serialize target resource file if not exist
+ TsFileResource targetResource = new TsFileResource(targetFile);
+ if (!targetResource.resourceFileExists()) {
+ try (TsFileSequenceReader reader =
+ new TsFileSequenceReader(targetFile.getAbsolutePath())) {
+ FileLoaderUtils.updateTsFileResource(reader, targetResource);
+ }
+ targetResource.serialize();
+ }
+
+ targetFileResources.add(targetResource);
+
+ // append compaction modifications to target mods file and delete compaction mods file
+ if (compactionModsFileFromOld.exists()) {
+ ModificationFile compactionModsFile =
+ new ModificationFile(compactionModsFileFromOld.getPath());
+ appendCompactionModificationsBefore013(targetResource, compactionModsFile);
+ }
+
+ // delete tmp target file
+ if (!checkAndDeleteFile(tmpTargetFile)) {
+ return false;
+ }
+ }
+
+ // delete source tsfile
+ File sourceFile = sourceFileIdentifier.getFileFromDataDirs();
+ if (!checkAndDeleteFile(sourceFile)) {
+ return false;
+ }
+
+ // delete source resource file
+ sourceFile =
+ getFileFromDataDirs(
+ sourceFileIdentifier.getFilePath() + TsFileResource.RESOURCE_SUFFIX);
+ if (!checkAndDeleteFile(sourceFile)) {
+ return false;
+ }
+
+ // delete source mods file
+ sourceFile =
+ getFileFromDataDirs(
+ sourceFileIdentifier.getFilePath() + ModificationFile.FILE_SUFFIX);
+ if (!checkAndDeleteFile(sourceFile)) {
+ return false;
+ }
+ }
+
+ // delete compaction mods file
+ if (!checkAndDeleteFile(compactionModsFileFromOld)) {
+ return false;
+ }
+ } catch (Throwable e) {
+ LOGGER.error(
+ "{} [Compaction][Recover] fail to handle with some source files lost from old version.",
+ fullStorageGroupName,
+ e);
+ return false;
+ }
+
+ return true;
+ }
+
+ private void appendCompactionModificationsBefore013(
+ TsFileResource resource, ModificationFile compactionModsFile) throws IOException {
+ if (compactionModsFile != null) {
+ for (Modification modification : compactionModsFile.getModifications()) {
+ // we have to set modification offset to MAX_VALUE, as the offset of source chunk may
+ // change after compaction
+ modification.setFileOffset(Long.MAX_VALUE);
+ resource.getModFile().write(modification);
+ }
+ resource.getModFile().close();
+ }
}
}
}
diff --git a/server/src/main/java/org/apache/iotdb/db/service/metrics/Metric.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/task/CompactionTaskSummary.java
similarity index 70%
rename from server/src/main/java/org/apache/iotdb/db/service/metrics/Metric.java
rename to server/src/main/java/org/apache/iotdb/db/engine/compaction/task/CompactionTaskSummary.java
index 959a675b31839..a7380969ff49f 100644
--- a/server/src/main/java/org/apache/iotdb/db/service/metrics/Metric.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/task/CompactionTaskSummary.java
@@ -16,26 +16,17 @@
* specific language governing permissions and limitations
* under the License.
*/
+package org.apache.iotdb.db.engine.compaction.task;
-package org.apache.iotdb.db.service.metrics;
+/** The summary of one {@link AbstractCompactionTask} execution */
+public class CompactionTaskSummary {
+ private final boolean success;
-public enum Metric {
- ENTRY,
- COST_TASK,
- QUEUE,
- FILE_SIZE,
- FILE_COUNT,
- MEM,
- CACHE_HIT,
- ERROR_LOG,
- QUANTITY,
- CLUSTER_NODE_STATUS,
- CLUSTER_NODE_LEADER_COUNT,
- CLUSTER_ELECT,
- CLUSTER_UNCOMMITTED_LOG;
+ public CompactionTaskSummary(boolean success) {
+ this.success = success;
+ }
- @Override
- public String toString() {
- return super.toString().toLowerCase();
+ public boolean isSuccess() {
+ return success;
}
}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/utils/log/CompactionLogger.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/utils/log/CompactionLogger.java
index 08e74398d1f40..c22f82ddf3bcd 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/utils/log/CompactionLogger.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/utils/log/CompactionLogger.java
@@ -71,11 +71,12 @@ public void logFiles(List tsFiles, String flag) throws IOExcepti
logStream.flush();
}
- public static File[] findCrossSpaceCompactionLogs(String directory) {
+ public static File[] findCompactionLogs(boolean isInnerSpace, String directory) {
+ String compactionLogSuffix =
+ isInnerSpace ? INNER_COMPACTION_LOG_NAME_SUFFIX : CROSS_COMPACTION_LOG_NAME_SUFFIX;
File timePartitionDir = new File(directory);
if (timePartitionDir.exists()) {
- return timePartitionDir.listFiles(
- (dir, name) -> name.endsWith(CROSS_COMPACTION_LOG_NAME_SUFFIX));
+ return timePartitionDir.listFiles((dir, name) -> name.endsWith(compactionLogSuffix));
} else {
return new File[0];
}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/writer/AbstractCompactionWriter.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/writer/AbstractCompactionWriter.java
index 7bbb4c6f86a31..5c1460230dc0a 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/writer/AbstractCompactionWriter.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/writer/AbstractCompactionWriter.java
@@ -19,8 +19,11 @@
package org.apache.iotdb.db.engine.compaction.writer;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.engine.compaction.CompactionMetricsManager;
import org.apache.iotdb.db.engine.compaction.CompactionTaskManager;
-import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
+import org.apache.iotdb.db.engine.compaction.constant.CompactionType;
+import org.apache.iotdb.db.engine.compaction.constant.ProcessChunkType;
+import org.apache.iotdb.metrics.config.MetricConfigDescriptor;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.utils.Binary;
import org.apache.iotdb.tsfile.utils.TsPrimitiveType;
@@ -34,34 +37,41 @@
import java.util.List;
public abstract class AbstractCompactionWriter implements AutoCloseable {
+ protected static final int subTaskNum =
+ IoTDBDescriptor.getInstance().getConfig().getSubCompactionTaskNum();
- protected IChunkWriter chunkWriter;
+ // Each sub task has its own chunk writer.
+ // The index of the array corresponds to subTaskId.
+ protected IChunkWriter[] chunkWriters = new IChunkWriter[subTaskNum];
protected boolean isAlign;
protected String deviceId;
private final long targetChunkSize =
IoTDBDescriptor.getInstance().getConfig().getTargetChunkSize();
+ private final boolean enableMetrics =
+ MetricConfigDescriptor.getInstance().getMetricConfig().getEnableMetric();
- // point count in current measurment, which is used to check size
- private int measurementPointCount;
+ // Each sub task has point count in current measurment, which is used to check size.
+ // The index of the array corresponds to subTaskId.
+ protected int[] measurementPointCountArray = new int[subTaskNum];
public abstract void startChunkGroup(String deviceId, boolean isAlign) throws IOException;
public abstract void endChunkGroup() throws IOException;
- public void startMeasurement(List measurementSchemaList) {
- measurementPointCount = 0;
+ public void startMeasurement(List measurementSchemaList, int subTaskId) {
+ measurementPointCountArray[subTaskId] = 0;
if (isAlign) {
- chunkWriter = new AlignedChunkWriterImpl(measurementSchemaList);
+ chunkWriters[subTaskId] = new AlignedChunkWriterImpl(measurementSchemaList);
} else {
- chunkWriter = new ChunkWriterImpl(measurementSchemaList.get(0), true);
+ chunkWriters[subTaskId] = new ChunkWriterImpl(measurementSchemaList.get(0), true);
}
}
- public abstract void endMeasurement() throws IOException;
+ public abstract void endMeasurement(int subTaskId) throws IOException;
- public abstract void write(long timestamp, Object value) throws IOException;
+ public abstract void write(long timestamp, Object value, int subTaskId) throws IOException;
public abstract void write(long[] timestamps, Object values);
@@ -69,9 +79,9 @@ public void startMeasurement(List measurementSchemaList) {
public abstract void close() throws IOException;
- protected void writeDataPoint(Long timestamp, Object value) {
+ protected void writeDataPoint(Long timestamp, Object value, int subTaskId) {
if (!isAlign) {
- ChunkWriterImpl chunkWriter = (ChunkWriterImpl) this.chunkWriter;
+ ChunkWriterImpl chunkWriter = (ChunkWriterImpl) this.chunkWriters[subTaskId];
switch (chunkWriter.getDataType()) {
case TEXT:
chunkWriter.write(timestamp, (Binary) value);
@@ -95,7 +105,7 @@ protected void writeDataPoint(Long timestamp, Object value) {
throw new UnsupportedOperationException("Unknown data type " + chunkWriter.getDataType());
}
} else {
- AlignedChunkWriterImpl chunkWriter = (AlignedChunkWriterImpl) this.chunkWriter;
+ AlignedChunkWriterImpl chunkWriter = (AlignedChunkWriterImpl) this.chunkWriters[subTaskId];
for (TsPrimitiveType val : (TsPrimitiveType[]) value) {
if (val == null) {
chunkWriter.write(timestamp, null, true);
@@ -127,21 +137,37 @@ protected void writeDataPoint(Long timestamp, Object value) {
}
chunkWriter.write(timestamp);
}
- measurementPointCount++;
+ measurementPointCountArray[subTaskId] += 1;
}
- protected void checkChunkSizeAndMayOpenANewChunk(TsFileIOWriter fileWriter) throws IOException {
- if (measurementPointCount % 10 == 0 && checkChunkSize()) {
- writeRateLimit(chunkWriter.estimateMaxSeriesMemSize());
- chunkWriter.writeToFileWriter(fileWriter);
+ protected void flushChunkToFileWriter(TsFileIOWriter targetWriter, int subTaskId)
+ throws IOException {
+ writeRateLimit(chunkWriters[subTaskId].estimateMaxSeriesMemSize());
+ synchronized (targetWriter) {
+ chunkWriters[subTaskId].writeToFileWriter(targetWriter);
}
}
- private boolean checkChunkSize() {
- if (chunkWriter instanceof AlignedChunkWriterImpl) {
- return ((AlignedChunkWriterImpl) chunkWriter).checkIsChunkSizeOverThreshold(targetChunkSize);
+ protected void checkChunkSizeAndMayOpenANewChunk(TsFileIOWriter fileWriter, int subTaskId)
+ throws IOException {
+ if (measurementPointCountArray[subTaskId] % 10 == 0 && checkChunkSize(subTaskId)) {
+ flushChunkToFileWriter(fileWriter, subTaskId);
+ CompactionMetricsManager.recordWriteInfo(
+ this instanceof CrossSpaceCompactionWriter
+ ? CompactionType.CROSS_COMPACTION
+ : CompactionType.INNER_UNSEQ_COMPACTION,
+ ProcessChunkType.DESERIALIZE_CHUNK,
+ this.isAlign,
+ chunkWriters[subTaskId].estimateMaxSeriesMemSize());
+ }
+ }
+
+ protected boolean checkChunkSize(int subTaskId) {
+ if (chunkWriters[subTaskId] instanceof AlignedChunkWriterImpl) {
+ return ((AlignedChunkWriterImpl) chunkWriters[subTaskId])
+ .checkIsChunkSizeOverThreshold(targetChunkSize);
} else {
- return chunkWriter.estimateMaxSeriesMemSize() > targetChunkSize;
+ return chunkWriters[subTaskId].estimateMaxSeriesMemSize() > targetChunkSize;
}
}
@@ -150,8 +176,5 @@ protected void writeRateLimit(long bytesLength) {
CompactionTaskManager.getInstance().getMergeWriteRateLimiter(), bytesLength);
}
- protected void updateDeviceStartAndEndTime(TsFileResource targetResource, long timestamp) {
- targetResource.updateStartTime(deviceId, timestamp);
- targetResource.updateEndTime(deviceId, timestamp);
- }
+ public abstract List getFileIOWriter();
}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/writer/CrossSpaceCompactionWriter.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/writer/CrossSpaceCompactionWriter.java
index 2ebe22581d7ce..3e245cfc35a13 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/writer/CrossSpaceCompactionWriter.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/writer/CrossSpaceCompactionWriter.java
@@ -21,7 +21,6 @@
import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
import org.apache.iotdb.db.query.control.FileReaderManager;
import org.apache.iotdb.tsfile.file.metadata.TimeseriesMetadata;
-import org.apache.iotdb.tsfile.write.writer.RestorableTsFileIOWriter;
import org.apache.iotdb.tsfile.write.writer.TsFileIOWriter;
import java.io.IOException;
@@ -36,68 +35,73 @@ public class CrossSpaceCompactionWriter extends AbstractCompactionWriter {
// source tsfiles
private List seqTsFileResources;
- private int seqFileIndex;
+ // Each sub task has its corresponding seq file index.
+ // The index of the array corresponds to subTaskId.
+ private int[] seqFileIndexArray = new int[subTaskNum];
+ // device end time in each source seq file
private final long[] currentDeviceEndTime;
+ // whether each target file is empty or not
private final boolean[] isEmptyFile;
- private final boolean[] hasTargetFileStartChunkGroup;
+ // whether each target file has device data or not
+ private final boolean[] isDeviceExistedInTargetFiles;
- private final List targetTsFileResources;
+ // current chunk group header size
+ private int chunkGroupHeaderSize;
public CrossSpaceCompactionWriter(
List targetResources, List seqFileResources)
throws IOException {
currentDeviceEndTime = new long[seqFileResources.size()];
isEmptyFile = new boolean[seqFileResources.size()];
- hasTargetFileStartChunkGroup = new boolean[seqFileResources.size()];
+ isDeviceExistedInTargetFiles = new boolean[targetResources.size()];
for (int i = 0; i < targetResources.size(); i++) {
- this.fileWriterList.add(new RestorableTsFileIOWriter(targetResources.get(i).getTsFile()));
+ this.fileWriterList.add(new TsFileIOWriter(targetResources.get(i).getTsFile()));
isEmptyFile[i] = true;
}
this.seqTsFileResources = seqFileResources;
- this.targetTsFileResources = targetResources;
- seqFileIndex = 0;
}
@Override
public void startChunkGroup(String deviceId, boolean isAlign) throws IOException {
this.deviceId = deviceId;
this.isAlign = isAlign;
- this.seqFileIndex = 0;
+ this.seqFileIndexArray = new int[subTaskNum];
checkIsDeviceExistAndGetDeviceEndTime();
- for (int i = 0; i < seqTsFileResources.size(); i++) {
- hasTargetFileStartChunkGroup[i] = false;
+ for (int i = 0; i < fileWriterList.size(); i++) {
+ chunkGroupHeaderSize = fileWriterList.get(i).startChunkGroup(deviceId);
}
}
@Override
public void endChunkGroup() throws IOException {
for (int i = 0; i < seqTsFileResources.size(); i++) {
- if (hasTargetFileStartChunkGroup[i]) {
- fileWriterList.get(i).endChunkGroup();
+ TsFileIOWriter targetFileWriter = fileWriterList.get(i);
+ if (isDeviceExistedInTargetFiles[i]) {
+ targetFileWriter.endChunkGroup();
+ } else {
+ targetFileWriter.truncate(targetFileWriter.getPos() - chunkGroupHeaderSize);
}
+ isDeviceExistedInTargetFiles[i] = false;
}
deviceId = null;
}
@Override
- public void endMeasurement() throws IOException {
- writeRateLimit(chunkWriter.estimateMaxSeriesMemSize());
- chunkWriter.writeToFileWriter(fileWriterList.get(seqFileIndex));
- chunkWriter = null;
- seqFileIndex = 0;
+ public void endMeasurement(int subTaskId) throws IOException {
+ flushChunkToFileWriter(fileWriterList.get(seqFileIndexArray[subTaskId]), subTaskId);
+ seqFileIndexArray[subTaskId] = 0;
}
@Override
- public void write(long timestamp, Object value) throws IOException {
- checkTimeAndMayFlushChunkToCurrentFile(timestamp);
- checkAndMayStartChunkGroup();
- writeDataPoint(timestamp, value);
- updateDeviceStartAndEndTime(targetTsFileResources.get(seqFileIndex), timestamp);
- checkChunkSizeAndMayOpenANewChunk(fileWriterList.get(seqFileIndex));
- isEmptyFile[seqFileIndex] = false;
+ public void write(long timestamp, Object value, int subTaskId) throws IOException {
+ checkTimeAndMayFlushChunkToCurrentFile(timestamp, subTaskId);
+ writeDataPoint(timestamp, value, subTaskId);
+ checkChunkSizeAndMayOpenANewChunk(fileWriterList.get(seqFileIndexArray[subTaskId]), subTaskId);
+ isDeviceExistedInTargetFiles[seqFileIndexArray[subTaskId]] = true;
+ isEmptyFile[seqFileIndexArray[subTaskId]] = false;
}
@Override
@@ -123,16 +127,21 @@ public void close() throws IOException {
}
fileWriterList = null;
seqTsFileResources = null;
- chunkWriter = null;
}
- private void checkTimeAndMayFlushChunkToCurrentFile(long timestamp) throws IOException {
+ @Override
+ public List getFileIOWriter() {
+ return fileWriterList;
+ }
+
+ private void checkTimeAndMayFlushChunkToCurrentFile(long timestamp, int subTaskId)
+ throws IOException {
+ int fileIndex = seqFileIndexArray[subTaskId];
// if timestamp is later than the current source seq tsfile, than flush chunk writer
- while (timestamp > currentDeviceEndTime[seqFileIndex]) {
- if (seqFileIndex != seqTsFileResources.size() - 1) {
- writeRateLimit(chunkWriter.estimateMaxSeriesMemSize());
- chunkWriter.writeToFileWriter(fileWriterList.get(seqFileIndex));
- seqFileIndex++;
+ while (timestamp > currentDeviceEndTime[fileIndex]) {
+ if (fileIndex != seqTsFileResources.size() - 1) {
+ flushChunkToFileWriter(fileWriterList.get(fileIndex), subTaskId);
+ seqFileIndexArray[subTaskId] = ++fileIndex;
} else {
// If the seq file is deleted for various reasons, the following two situations may occur
// when selecting the source files: (1) unseq files may have some devices or measurements
@@ -168,11 +177,4 @@ private void checkIsDeviceExistAndGetDeviceEndTime() throws IOException {
fileIndex++;
}
}
-
- private void checkAndMayStartChunkGroup() throws IOException {
- if (!hasTargetFileStartChunkGroup[seqFileIndex]) {
- fileWriterList.get(seqFileIndex).startChunkGroup(deviceId);
- hasTargetFileStartChunkGroup[seqFileIndex] = true;
- }
- }
}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/writer/InnerSpaceCompactionWriter.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/writer/InnerSpaceCompactionWriter.java
index 7b0e31095dfd3..af2cc53c67ed4 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/writer/InnerSpaceCompactionWriter.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/writer/InnerSpaceCompactionWriter.java
@@ -19,22 +19,20 @@
package org.apache.iotdb.db.engine.compaction.writer;
import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
-import org.apache.iotdb.tsfile.write.writer.RestorableTsFileIOWriter;
import org.apache.iotdb.tsfile.write.writer.TsFileIOWriter;
import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
public class InnerSpaceCompactionWriter extends AbstractCompactionWriter {
private TsFileIOWriter fileWriter;
private boolean isEmptyFile;
- private final TsFileResource targetTsFileResource;
-
public InnerSpaceCompactionWriter(TsFileResource targetFileResource) throws IOException {
- fileWriter = new RestorableTsFileIOWriter(targetFileResource.getTsFile());
+ this.fileWriter = new TsFileIOWriter(targetFileResource.getTsFile());
isEmptyFile = true;
- this.targetTsFileResource = targetFileResource;
}
@Override
@@ -50,17 +48,14 @@ public void endChunkGroup() throws IOException {
}
@Override
- public void endMeasurement() throws IOException {
- writeRateLimit(chunkWriter.estimateMaxSeriesMemSize());
- chunkWriter.writeToFileWriter(fileWriter);
- chunkWriter = null;
+ public void endMeasurement(int subTaskId) throws IOException {
+ flushChunkToFileWriter(fileWriter, subTaskId);
}
@Override
- public void write(long timestamp, Object value) throws IOException {
- writeDataPoint(timestamp, value);
- updateDeviceStartAndEndTime(targetTsFileResource, timestamp);
- checkChunkSizeAndMayOpenANewChunk(fileWriter);
+ public void write(long timestamp, Object value, int subTaskId) throws IOException {
+ writeDataPoint(timestamp, value, subTaskId);
+ checkChunkSizeAndMayOpenANewChunk(fileWriter, subTaskId);
isEmptyFile = false;
}
@@ -80,7 +75,11 @@ public void close() throws IOException {
if (fileWriter != null && fileWriter.canWrite()) {
fileWriter.close();
}
- chunkWriter = null;
fileWriter = null;
}
+
+ @Override
+ public List getFileIOWriter() {
+ return Collections.singletonList(fileWriter);
+ }
}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/flush/FlushManager.java b/server/src/main/java/org/apache/iotdb/db/engine/flush/FlushManager.java
index 01f4308a7fdf6..8da0a37a1892f 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/flush/FlushManager.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/flush/FlushManager.java
@@ -29,9 +29,9 @@
import org.apache.iotdb.db.service.IService;
import org.apache.iotdb.db.service.JMXService;
import org.apache.iotdb.db.service.ServiceType;
-import org.apache.iotdb.db.service.metrics.Metric;
import org.apache.iotdb.db.service.metrics.MetricsService;
-import org.apache.iotdb.db.service.metrics.Tag;
+import org.apache.iotdb.db.service.metrics.enums.Metric;
+import org.apache.iotdb.db.service.metrics.enums.Tag;
import org.apache.iotdb.metrics.config.MetricConfigDescriptor;
import org.apache.iotdb.metrics.utils.MetricLevel;
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/flush/MemTableFlushTask.java b/server/src/main/java/org/apache/iotdb/db/engine/flush/MemTableFlushTask.java
index 387c609e3d977..5c7fa5e62d32d 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/flush/MemTableFlushTask.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/flush/MemTableFlushTask.java
@@ -27,9 +27,9 @@
import org.apache.iotdb.db.exception.runtime.FlushRunTimeException;
import org.apache.iotdb.db.metadata.idtable.entry.IDeviceID;
import org.apache.iotdb.db.rescon.SystemInfo;
-import org.apache.iotdb.db.service.metrics.Metric;
import org.apache.iotdb.db.service.metrics.MetricsService;
-import org.apache.iotdb.db.service.metrics.Tag;
+import org.apache.iotdb.db.service.metrics.enums.Metric;
+import org.apache.iotdb.db.service.metrics.enums.Tag;
import org.apache.iotdb.metrics.config.MetricConfigDescriptor;
import org.apache.iotdb.metrics.utils.MetricLevel;
import org.apache.iotdb.tsfile.write.chunk.IChunkWriter;
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/memtable/AbstractMemTable.java b/server/src/main/java/org/apache/iotdb/db/engine/memtable/AbstractMemTable.java
index 06706b1361f5e..7cde23546ba5e 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/memtable/AbstractMemTable.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/memtable/AbstractMemTable.java
@@ -28,9 +28,9 @@
import org.apache.iotdb.db.metadata.path.PartialPath;
import org.apache.iotdb.db.qp.physical.crud.InsertRowPlan;
import org.apache.iotdb.db.qp.physical.crud.InsertTabletPlan;
-import org.apache.iotdb.db.service.metrics.Metric;
import org.apache.iotdb.db.service.metrics.MetricsService;
-import org.apache.iotdb.db.service.metrics.Tag;
+import org.apache.iotdb.db.service.metrics.enums.Metric;
+import org.apache.iotdb.db.service.metrics.enums.Tag;
import org.apache.iotdb.db.utils.MemUtils;
import org.apache.iotdb.metrics.config.MetricConfigDescriptor;
import org.apache.iotdb.metrics.utils.MetricLevel;
@@ -152,10 +152,17 @@ public void insert(InsertRowPlan insertRowPlan) {
List schemaList = new ArrayList<>();
List dataTypes = new ArrayList<>();
+ int nullPointsNumber = 0;
for (int i = 0; i < insertRowPlan.getMeasurements().length; i++) {
+ // use measurements[i] to ignore failed partial insert
if (measurements[i] == null) {
continue;
}
+ // use values[i] to ignore null value
+ if (values[i] == null) {
+ nullPointsNumber++;
+ continue;
+ }
IMeasurementSchema schema = insertRowPlan.getMeasurementMNodes()[i].getSchema();
schemaList.add(schema);
dataTypes.add(schema.getType());
@@ -164,7 +171,9 @@ public void insert(InsertRowPlan insertRowPlan) {
write(insertRowPlan.getDeviceID(), schemaList, insertRowPlan.getTime(), values);
int pointsInserted =
- insertRowPlan.getMeasurements().length - insertRowPlan.getFailedMeasurementNumber();
+ insertRowPlan.getMeasurements().length
+ - insertRowPlan.getFailedMeasurementNumber()
+ - nullPointsNumber;
totalPointsNum += pointsInserted;
@@ -190,9 +199,12 @@ public void insertAlignedRow(InsertRowPlan insertRowPlan) {
updatePlanIndexes(insertRowPlan.getIndex());
String[] measurements = insertRowPlan.getMeasurements();
+ Object[] values = insertRowPlan.getValues();
+
List schemaList = new ArrayList<>();
List dataTypes = new ArrayList<>();
for (int i = 0; i < insertRowPlan.getMeasurements().length; i++) {
+ // use measurements[i] to ignore failed partial insert
if (measurements[i] == null) {
continue;
}
@@ -203,13 +215,8 @@ public void insertAlignedRow(InsertRowPlan insertRowPlan) {
if (schemaList.isEmpty()) {
return;
}
- memSize +=
- MemUtils.getAlignedRecordsSize(dataTypes, insertRowPlan.getValues(), disableMemControl);
- writeAlignedRow(
- insertRowPlan.getDeviceID(),
- schemaList,
- insertRowPlan.getTime(),
- insertRowPlan.getValues());
+ memSize += MemUtils.getAlignedRecordsSize(dataTypes, values, disableMemControl);
+ writeAlignedRow(insertRowPlan.getDeviceID(), schemaList, insertRowPlan.getTime(), values);
int pointsInserted =
insertRowPlan.getMeasurements().length - insertRowPlan.getFailedMeasurementNumber();
totalPointsNum += pointsInserted;
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/memtable/AlignedWritableMemChunk.java b/server/src/main/java/org/apache/iotdb/db/engine/memtable/AlignedWritableMemChunk.java
index cef4c5d9deb69..c7ec62f1f1ae1 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/memtable/AlignedWritableMemChunk.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/memtable/AlignedWritableMemChunk.java
@@ -206,7 +206,7 @@ public IMeasurementSchema getSchema() {
}
@Override
- public TVList getSortedTvListForQuery() {
+ public synchronized TVList getSortedTvListForQuery() {
sortTVList();
// increase reference count
list.increaseReferenceCount();
@@ -214,7 +214,7 @@ public TVList getSortedTvListForQuery() {
}
@Override
- public TVList getSortedTvListForQuery(List schemaList) {
+ public synchronized TVList getSortedTvListForQuery(List schemaList) {
sortTVList();
// increase reference count
list.increaseReferenceCount();
@@ -238,7 +238,7 @@ private void sortTVList() {
}
@Override
- public void sortTvListForFlush() {
+ public synchronized void sortTvListForFlush() {
sortTVList();
}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/memtable/IWritableMemChunk.java b/server/src/main/java/org/apache/iotdb/db/engine/memtable/IWritableMemChunk.java
index 3061c30be73b1..c46714fbbaf17 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/memtable/IWritableMemChunk.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/memtable/IWritableMemChunk.java
@@ -94,6 +94,8 @@ void writeAlignedValues(
*
*
the mechanism is just like copy on write
*
+ *
This interface should be synchronized for concurrent with sortTvListForFlush
+ *
* @return sorted tv list
*/
TVList getSortedTvListForQuery();
@@ -103,6 +105,8 @@ void writeAlignedValues(
*
*
the mechanism is just like copy on write
*
+ *
This interface should be synchronized for concurrent with sortTvListForFlush
+ *
* @return sorted tv list
*/
TVList getSortedTvListForQuery(List schemaList);
@@ -110,6 +114,8 @@ void writeAlignedValues(
/**
* served for flush requests. The logic is just same as getSortedTVListForQuery, but without add
* reference count
+ *
+ *
This interface should be synchronized for concurrent with getSortedTvListForQuery
*/
void sortTvListForFlush();
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/querycontext/AlignedReadOnlyMemChunk.java b/server/src/main/java/org/apache/iotdb/db/engine/querycontext/AlignedReadOnlyMemChunk.java
index f0d7cb1be56d2..5e44026dbb6f6 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/querycontext/AlignedReadOnlyMemChunk.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/querycontext/AlignedReadOnlyMemChunk.java
@@ -32,6 +32,7 @@
import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics;
import org.apache.iotdb.tsfile.read.common.TimeRange;
import org.apache.iotdb.tsfile.read.reader.IPointReader;
+import org.apache.iotdb.tsfile.utils.Pair;
import org.apache.iotdb.tsfile.write.schema.IMeasurementSchema;
import org.apache.iotdb.tsfile.write.schema.VectorMeasurementSchema;
@@ -95,24 +96,57 @@ private void initAlignedChunkMeta(VectorMeasurementSchema schema)
new ChunkMetadata(measurementUid, TSDataType.VECTOR, 0, timeStatistics);
List valueChunkMetadataList = new ArrayList<>();
// update time chunk
+ boolean[] timeDuplicateInfo = null;
for (int row = 0; row < alignedChunkData.rowCount(); row++) {
- timeStatistics.update(alignedChunkData.getTime(row));
+ if (row == alignedChunkData.rowCount() - 1
+ || alignedChunkData.getTime(row) != alignedChunkData.getTime(row + 1)) {
+ timeStatistics.update(alignedChunkData.getTime(row));
+ } else {
+ if (timeDuplicateInfo == null) {
+ timeDuplicateInfo = new boolean[alignedChunkData.rowCount()];
+ }
+ timeDuplicateInfo[row] = true;
+ }
}
timeStatistics.setEmpty(false);
// update value chunk
for (int column = 0; column < measurementList.size(); column++) {
+ // Pair of Time and Index
+ Pair lastValidPointIndexForTimeDupCheck = null;
+ if (timeDuplicateInfo != null) {
+ lastValidPointIndexForTimeDupCheck = new Pair<>(Long.MIN_VALUE, null);
+ }
Statistics valueStatistics = Statistics.getStatsByType(dataTypeList.get(column));
- IChunkMetadata valueChunkMetadata =
- new ChunkMetadata(
- measurementList.get(column), dataTypeList.get(column), 0, valueStatistics);
- valueChunkMetadataList.add(valueChunkMetadata);
if (alignedChunkData.getValues().get(column) == null) {
- valueStatistics.setEmpty(true);
+ valueChunkMetadataList.add(null);
continue;
}
for (int row = 0; row < alignedChunkData.rowCount(); row++) {
long time = alignedChunkData.getTime(row);
- int originRowIndex = alignedChunkData.getValueIndex(row);
+ // skip time duplicated rows
+ if (timeDuplicateInfo != null) {
+ if (!alignedChunkData.isValueMarked(alignedChunkData.getValueIndex(row), column)) {
+ lastValidPointIndexForTimeDupCheck.left = time;
+ lastValidPointIndexForTimeDupCheck.right = alignedChunkData.getValueIndex(row);
+ }
+ if (timeDuplicateInfo[row]) {
+ continue;
+ }
+ }
+ // The part of code solves the following problem:
+ // Time: 1,2,2,3
+ // Value: 1,2,null,null
+ // When rowIndex:1, pair(min,null), timeDuplicateInfo:false, write(T:1,V:1)
+ // When rowIndex:2, pair(2,2), timeDuplicateInfo:true, skip writing value
+ // When rowIndex:3, pair(2,2), timeDuplicateInfo:false, T:2!=air.left:2, write(T:2,V:2)
+ // When rowIndex:4, pair(2,2), timeDuplicateInfo:false, T:3!=pair.left:2, write(T:3,V:null)
+ int originRowIndex;
+ if (lastValidPointIndexForTimeDupCheck != null
+ && (alignedChunkData.getTime(row) == lastValidPointIndexForTimeDupCheck.left)) {
+ originRowIndex = lastValidPointIndexForTimeDupCheck.right;
+ } else {
+ originRowIndex = alignedChunkData.getValueIndex(row);
+ }
boolean isNull = alignedChunkData.isValueMarked(originRowIndex, column);
if (isNull) {
continue;
@@ -146,6 +180,15 @@ private void initAlignedChunkMeta(VectorMeasurementSchema schema)
throw new QueryProcessException("Unsupported data type:" + dataType);
}
}
+ if (valueStatistics.getCount() > 0) {
+ IChunkMetadata valueChunkMetadata =
+ new ChunkMetadata(
+ measurementList.get(column), dataTypeList.get(column), 0, valueStatistics);
+ valueChunkMetadataList.add(valueChunkMetadata);
+ valueStatistics.setEmpty(false);
+ } else {
+ valueChunkMetadataList.add(null);
+ }
valueStatistics.setEmpty(false);
}
IChunkMetadata vectorChunkMetadata =
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/TsFileProcessor.java b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/TsFileProcessor.java
index 71eaef12efe0a..4a9f9043a058e 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/TsFileProcessor.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/TsFileProcessor.java
@@ -1000,6 +1000,22 @@ private void releaseFlushedMemTable(IMemTable memTable) {
}
}
+ /** This method will synchronize the memTable and release its flushing resources */
+ private void syncReleaseFlushedMemTable(IMemTable memTable) {
+ synchronized (memTable) {
+ releaseFlushedMemTable(memTable);
+ memTable.notifyAll();
+ if (logger.isDebugEnabled()) {
+ logger.debug(
+ "{}: {} released a memtable (signal={}), flushingMemtables size ={}",
+ storageGroupName,
+ tsFileResource.getTsFile().getName(),
+ memTable.isSignalMemTable(),
+ flushingMemTables.size());
+ }
+ }
+ }
+
/**
* Take the first MemTable from the flushingMemTables and flush it. Called by a flush thread of
* the flush manager pool
@@ -1015,7 +1031,7 @@ public void flushOneMemTable() {
MemTableFlushTask flushTask =
new MemTableFlushTask(memTableToFlush, writer, storageGroupName);
flushTask.syncFlushMemTable();
- } catch (Exception e) {
+ } catch (Throwable e) {
if (writer == null) {
logger.info(
"{}: {} is closed during flush, abandon flush task",
@@ -1045,7 +1061,32 @@ public void flushOneMemTable() {
tsFileResource.getTsFile().getName(),
e1);
}
- Thread.currentThread().interrupt();
+ // release resource
+ try {
+ syncReleaseFlushedMemTable(memTableToFlush);
+ // close wal node
+ MultiFileLogNodeManager.getInstance()
+ .closeNode(storageGroupName + "-" + tsFileResource.getTsFile().getName());
+ // make sure no query will search this file
+ tsFileResource.setTimeIndex(config.getTimeIndexLevel().getTimeIndex());
+ // this callback method will register this empty tsfile into TsFileManager
+ for (CloseFileListener closeFileListener : closeFileListeners) {
+ closeFileListener.onClosed(this);
+ }
+ // close writer
+ writer.close();
+ writer = null;
+ synchronized (flushingMemTables) {
+ flushingMemTables.notifyAll();
+ }
+ } catch (Exception e1) {
+ logger.error(
+ "{}: {} Release resource meets error",
+ storageGroupName,
+ tsFileResource.getTsFile().getName(),
+ e1);
+ }
+ return;
}
}
}
@@ -1087,19 +1128,9 @@ public void flushOneMemTable() {
tsFileResource.getTsFile().getName(),
memTableToFlush.isSignalMemTable());
}
+
// for sync flush
- synchronized (memTableToFlush) {
- releaseFlushedMemTable(memTableToFlush);
- memTableToFlush.notifyAll();
- if (logger.isDebugEnabled()) {
- logger.debug(
- "{}: {} released a memtable (signal={}), flushingMemtables size ={}",
- storageGroupName,
- tsFileResource.getTsFile().getName(),
- memTableToFlush.isSignalMemTable(),
- flushingMemTables.size());
- }
- }
+ syncReleaseFlushedMemTable(memTableToFlush);
if (shouldClose && flushingMemTables.isEmpty() && writer != null) {
try {
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/TsFileProcessorInfo.java b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/TsFileProcessorInfo.java
index b4c24fcab5eb9..273446eaea3eb 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/TsFileProcessorInfo.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/TsFileProcessorInfo.java
@@ -18,9 +18,9 @@
*/
package org.apache.iotdb.db.engine.storagegroup;
-import org.apache.iotdb.db.service.metrics.Metric;
import org.apache.iotdb.db.service.metrics.MetricsService;
-import org.apache.iotdb.db.service.metrics.Tag;
+import org.apache.iotdb.db.service.metrics.enums.Metric;
+import org.apache.iotdb.db.service.metrics.enums.Tag;
import org.apache.iotdb.metrics.config.MetricConfigDescriptor;
import org.apache.iotdb.metrics.utils.MetricLevel;
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/TsFileResource.java b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/TsFileResource.java
index e0672337fe9f9..14f21df9501ce 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/TsFileResource.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/TsFileResource.java
@@ -95,14 +95,11 @@ public class TsFileResource {
/** time index type, V012FileTimeIndex = 0, deviceTimeIndex = 1, fileTimeIndex = 2 */
private byte timeIndexType;
- private ModificationFile modFile;
+ private volatile ModificationFile modFile;
- private ModificationFile compactionModFile;
+ private volatile ModificationFile compactionModFile;
- protected volatile boolean closed = false;
- private volatile boolean deleted = false;
- volatile boolean isCompacting = false;
- volatile boolean compactionCandidate = false;
+ protected volatile TsFileResourceStatus status = TsFileResourceStatus.UNCLOSED;
private TsFileLock tsFileLock = new TsFileLock();
@@ -133,7 +130,7 @@ public class TsFileResource {
private long ramSize;
- private long tsFileSize = -1L;
+ private volatile long tsFileSize = -1L;
private TsFileProcessor processor;
@@ -164,9 +161,7 @@ public TsFileResource(TsFileResource other) throws IOException {
this.timeIndex = other.timeIndex;
this.timeIndexType = other.timeIndexType;
this.modFile = other.modFile;
- this.closed = other.closed;
- this.deleted = other.deleted;
- this.isCompacting = other.isCompacting;
+ this.status = other.status;
this.pathToChunkMetadataListMap = other.pathToChunkMetadataListMap;
this.pathToReadOnlyMemChunkMap = other.pathToReadOnlyMemChunkMap;
this.pathToTimeSeriesMetadataMap = other.pathToTimeSeriesMetadataMap;
@@ -383,7 +378,7 @@ public String getTsFilePath() {
}
public long getTsFileSize() {
- if (closed) {
+ if (isClosed()) {
if (tsFileSize == -1) {
synchronized (this) {
if (tsFileSize == -1) {
@@ -432,11 +427,11 @@ public boolean mayContainsDevice(String device) {
}
public boolean isClosed() {
- return closed;
+ return this.status != TsFileResourceStatus.UNCLOSED;
}
public void close() throws IOException {
- closed = true;
+ this.setStatus(TsFileResourceStatus.CLOSED);
if (modFile != null) {
modFile.close();
modFile = null;
@@ -555,9 +550,7 @@ void moveTo(File targetDir) {
@Override
public String toString() {
- return String.format(
- "file is %s, compactionCandidate: %s, compacting: %s",
- file.toString(), compactionCandidate, isCompacting);
+ return String.format("file is %s, status: %s", file.toString(), status);
}
@Override
@@ -577,32 +570,58 @@ public int hashCode() {
return Objects.hash(file);
}
- public void setClosed(boolean closed) {
- this.closed = closed;
- }
-
public boolean isDeleted() {
- return deleted;
- }
-
- public void setDeleted(boolean deleted) {
- this.deleted = deleted;
+ return this.status == TsFileResourceStatus.DELETED;
}
public boolean isCompacting() {
- return isCompacting;
- }
-
- public void setCompacting(boolean compacting) {
- isCompacting = compacting;
+ return this.status == TsFileResourceStatus.COMPACTING;
}
public boolean isCompactionCandidate() {
- return compactionCandidate;
+ return this.status == TsFileResourceStatus.COMPACTION_CANDIDATE;
}
- public void setCompactionCandidate(boolean compactionCandidate) {
- this.compactionCandidate = compactionCandidate;
+ public void setStatus(TsFileResourceStatus status) {
+ switch (status) {
+ case CLOSED:
+ if (this.status != TsFileResourceStatus.DELETED) {
+ this.status = TsFileResourceStatus.CLOSED;
+ }
+ break;
+ case UNCLOSED:
+ // Print a stack trace in a warn statement.
+ this.status = TsFileResourceStatus.UNCLOSED;
+ break;
+ case DELETED:
+ if (this.status != TsFileResourceStatus.UNCLOSED) {
+ this.status = TsFileResourceStatus.DELETED;
+ } else {
+ throw new RuntimeException(
+ "Cannot set the status of an unclosed TsFileResource to DELETED");
+ }
+ break;
+ case COMPACTING:
+ if (this.status == TsFileResourceStatus.COMPACTION_CANDIDATE) {
+ this.status = TsFileResourceStatus.COMPACTING;
+ } else {
+ throw new RuntimeException(
+ "Cannot set the status of TsFileResource to COMPACTING while its status is "
+ + this.status);
+ }
+ break;
+ case COMPACTION_CANDIDATE:
+ if (this.status == TsFileResourceStatus.CLOSED) {
+ this.status = TsFileResourceStatus.COMPACTION_CANDIDATE;
+ } else {
+ throw new RuntimeException(
+ "Cannot set the status of TsFileResource to COMPACTION_CANDIDATE while its status is "
+ + this.status);
+ }
+ break;
+ default:
+ break;
+ }
}
/**
@@ -633,7 +652,7 @@ public boolean isSatisfied(
}
long startTime = getStartTime(deviceId);
- long endTime = closed || !isSeq ? getEndTime(deviceId) : Long.MAX_VALUE;
+ long endTime = isClosed() || !isSeq ? getEndTime(deviceId) : Long.MAX_VALUE;
if (!isAlive(endTime, ttl)) {
if (debug) {
@@ -656,7 +675,7 @@ public boolean isSatisfied(
/** @return true if the TsFile lives beyond TTL */
private boolean isSatisfied(Filter timeFilter, boolean isSeq, long ttl, boolean debug) {
long startTime = getFileStartTime();
- long endTime = closed || !isSeq ? getFileEndTime() : Long.MAX_VALUE;
+ long endTime = isClosed() || !isSeq ? getFileEndTime() : Long.MAX_VALUE;
if (!isAlive(endTime, ttl)) {
if (debug) {
@@ -695,7 +714,7 @@ public boolean isSatisfied(
}
long startTime = getStartTime(deviceId);
- long endTime = closed || !isSeq ? getEndTime(deviceId) : Long.MAX_VALUE;
+ long endTime = isClosed() || !isSeq ? getEndTime(deviceId) : Long.MAX_VALUE;
if (timeFilter != null) {
boolean res = timeFilter.satisfyStartEndTime(startTime, endTime);
@@ -853,7 +872,7 @@ public void updatePlanIndexes(long planIndex) {
}
maxPlanIndex = Math.max(maxPlanIndex, planIndex);
minPlanIndex = Math.min(minPlanIndex, planIndex);
- if (closed) {
+ if (isClosed()) {
try {
serialize();
} catch (IOException e) {
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/TsFileResourceStatus.java b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/TsFileResourceStatus.java
new file mode 100644
index 0000000000000..42eaf481d3d64
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/TsFileResourceStatus.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.db.engine.storagegroup;
+
+public enum TsFileResourceStatus {
+ UNCLOSED,
+ CLOSED,
+ COMPACTION_CANDIDATE,
+ COMPACTING,
+ DELETED
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/VirtualStorageGroupProcessor.java b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/VirtualStorageGroupProcessor.java
index c2e4c268ec8b5..a355670824926 100755
--- a/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/VirtualStorageGroupProcessor.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/VirtualStorageGroupProcessor.java
@@ -25,12 +25,9 @@
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.conf.directories.DirectoryManager;
import org.apache.iotdb.db.engine.StorageEngine;
-import org.apache.iotdb.db.engine.cache.ChunkCache;
-import org.apache.iotdb.db.engine.cache.TimeSeriesMetadataCache;
import org.apache.iotdb.db.engine.compaction.CompactionScheduler;
import org.apache.iotdb.db.engine.compaction.CompactionTaskManager;
-import org.apache.iotdb.db.engine.compaction.inner.utils.InnerSpaceCompactionUtils;
-import org.apache.iotdb.db.engine.compaction.task.CompactionRecoverTask;
+import org.apache.iotdb.db.engine.compaction.task.CompactionRecoverManager;
import org.apache.iotdb.db.engine.fileSystem.SystemFileFactory;
import org.apache.iotdb.db.engine.flush.CloseFileListener;
import org.apache.iotdb.db.engine.flush.FlushListener;
@@ -70,9 +67,9 @@
import org.apache.iotdb.db.rescon.TsFileResourceManager;
import org.apache.iotdb.db.service.IoTDB;
import org.apache.iotdb.db.service.SettleService;
-import org.apache.iotdb.db.service.metrics.Metric;
import org.apache.iotdb.db.service.metrics.MetricsService;
-import org.apache.iotdb.db.service.metrics.Tag;
+import org.apache.iotdb.db.service.metrics.enums.Metric;
+import org.apache.iotdb.db.service.metrics.enums.Tag;
import org.apache.iotdb.db.tools.settle.TsFileAndModSettleTool;
import org.apache.iotdb.db.utils.CopyOnReadLinkedList;
import org.apache.iotdb.db.utils.MmapUtil;
@@ -119,11 +116,10 @@
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.regex.Pattern;
import static org.apache.iotdb.db.conf.IoTDBConstant.FILE_NAME_SEPARATOR;
-import static org.apache.iotdb.db.engine.compaction.utils.log.CompactionLogger.INNER_COMPACTION_LOG_NAME_SUFFIX_FROM_OLD;
import static org.apache.iotdb.db.engine.storagegroup.TsFileResource.TEMP_SUFFIX;
+import static org.apache.iotdb.db.qp.executor.PlanExecutor.operateClearCache;
import static org.apache.iotdb.tsfile.common.constant.TsFileConstant.TSFILE_SUFFIX;
/**
@@ -496,9 +492,7 @@ public void incrementRecoveredFilesNum() {
/** recover from file */
private void recover() throws StorageGroupProcessorException {
try {
- recoverInnerSpaceCompaction(true);
- recoverInnerSpaceCompaction(false);
- recoverCrossSpaceCompaction();
+ recoverCompaction();
} catch (Exception e) {
throw new StorageGroupProcessorException(e);
}
@@ -592,83 +586,12 @@ private void initCompaction() {
TimeUnit.MILLISECONDS);
}
- /** recover crossSpaceCompaction */
- private void recoverCrossSpaceCompaction() throws Exception {
- CompactionRecoverTask compactionRecoverTask =
- new CompactionRecoverTask(tsFileManager, logicalStorageGroupName, virtualStorageGroupId);
- compactionRecoverTask.recoverCrossSpaceCompaction();
- }
-
- private void recoverInnerSpaceCompaction(boolean isSequence) throws Exception {
- // search compaction log for SizeTieredCompaction
- List dirs;
- if (isSequence) {
- dirs = DirectoryManager.getInstance().getAllSequenceFileFolders();
- } else {
- dirs = DirectoryManager.getInstance().getAllUnSequenceFileFolders();
- }
- for (String dir : dirs) {
- File storageGroupDir =
- new File(
- dir
- + File.separator
- + logicalStorageGroupName
- + File.separator
- + virtualStorageGroupId);
- if (!storageGroupDir.exists()) {
- return;
- }
- File[] timePartitionDirs = storageGroupDir.listFiles();
- if (timePartitionDirs == null) {
- return;
- }
- for (File timePartitionDir : timePartitionDirs) {
- if (!timePartitionDir.isDirectory()
- || !Pattern.compile("[0-9]*").matcher(timePartitionDir.getName()).matches()) {
- continue;
- }
- File[] compactionLogs =
- InnerSpaceCompactionUtils.findInnerSpaceCompactionLogs(timePartitionDir.getPath());
- for (File compactionLog : compactionLogs) {
- IoTDBDescriptor.getInstance()
- .getConfig()
- .getInnerCompactionStrategy()
- .getCompactionRecoverTask(
- tsFileManager.getStorageGroupName(),
- tsFileManager.getVirtualStorageGroup(),
- Long.parseLong(
- timePartitionDir
- .getPath()
- .substring(timePartitionDir.getPath().lastIndexOf(File.separator) + 1)),
- compactionLog,
- timePartitionDir.getPath(),
- isSequence,
- tsFileManager)
- .call();
- }
- }
- }
-
- // search compaction log for old LevelCompaction
- File logFile =
- FSFactoryProducer.getFSFactory()
- .getFile(
- storageGroupSysDir.getAbsolutePath(),
- logicalStorageGroupName + INNER_COMPACTION_LOG_NAME_SUFFIX_FROM_OLD);
- if (logFile.exists()) {
- IoTDBDescriptor.getInstance()
- .getConfig()
- .getInnerCompactionStrategy()
- .getCompactionRecoverTask(
- tsFileManager.getStorageGroupName(),
- tsFileManager.getVirtualStorageGroup(),
- -1,
- logFile,
- logFile.getParent(),
- isSequence,
- tsFileManager)
- .call();
- }
+ private void recoverCompaction() throws Exception {
+ CompactionRecoverManager compactionRecoverManager =
+ new CompactionRecoverManager(tsFileManager, logicalStorageGroupName, virtualStorageGroupId);
+ compactionRecoverManager.recoverInnerSpaceCompaction(true);
+ compactionRecoverManager.recoverInnerSpaceCompaction(false);
+ compactionRecoverManager.recoverCrossSpaceCompaction();
}
private void updatePartitionFileVersion(long partitionNum, long fileVersion) {
@@ -777,7 +700,8 @@ private Pair, List> getAllFiles(List upgradeRet = new ArrayList<>();
for (File f : upgradeFiles) {
TsFileResource fileResource = new TsFileResource(f);
- fileResource.setClosed(true);
+ fileResource.setStatus(TsFileResourceStatus.CLOSED);
+ ;
// make sure the flush command is called before IoTDB is down.
fileResource.deserializeFromOldFile();
upgradeRet.add(fileResource);
@@ -846,7 +770,7 @@ private void recoverTsFiles(List tsFiles, RecoveryContext contex
if (writer != null && writer.hasCrashed()) {
tsFileManager.addForRecover(tsFileResource, isSeq);
} else {
- tsFileResource.setClosed(true);
+ tsFileResource.setStatus(TsFileResourceStatus.CLOSED);
tsFileManager.add(tsFileResource, isSeq);
tsFileResourceManager.registerSealedTsFileResource(tsFileResource);
}
@@ -1573,7 +1497,7 @@ private void checkFileTTL(TsFileResource resource, long ttlLowerBound, boolean i
}
// prevent new merges and queries from choosing this file
- resource.setDeleted(true);
+ resource.setStatus(TsFileResourceStatus.DELETED);
// ensure that the file is not used by any queries
if (resource.tryWriteLock()) {
@@ -2036,7 +1960,7 @@ private void deleteDataInFiles(
continue;
}
- if (tsFileResource.isCompacting) {
+ if (tsFileResource.isCompacting()) {
// we have to set modification offset to MAX_VALUE, as the offset of source chunk may
// change after compaction
deletion.setFileOffset(Long.MAX_VALUE);
@@ -2244,9 +2168,8 @@ private void settleTsFileCallBack(
.recoverSettleFileMap
.remove(oldTsFileResource.getTsFile().getAbsolutePath());
}
- // clear Cache , including chunk cache and timeseriesMetadata cache
- ChunkCache.getInstance().clear();
- TimeSeriesMetadataCache.getInstance().clear();
+ // clear Cache , including chunk cache, timeseriesMetadata cache and bloom filter cache
+ operateClearCache();
// if old tsfile is being deleted in the process due to its all data's being deleted.
if (!oldTsFileResource.getTsFile().exists()) {
diff --git a/server/src/main/java/org/apache/iotdb/db/metadata/MManager.java b/server/src/main/java/org/apache/iotdb/db/metadata/MManager.java
index 0323d093bb42e..1784e782f7e4d 100644
--- a/server/src/main/java/org/apache/iotdb/db/metadata/MManager.java
+++ b/server/src/main/java/org/apache/iotdb/db/metadata/MManager.java
@@ -82,9 +82,9 @@
import org.apache.iotdb.db.query.dataset.ShowTimeSeriesResult;
import org.apache.iotdb.db.rescon.MemTableManager;
import org.apache.iotdb.db.service.IoTDB;
-import org.apache.iotdb.db.service.metrics.Metric;
import org.apache.iotdb.db.service.metrics.MetricsService;
-import org.apache.iotdb.db.service.metrics.Tag;
+import org.apache.iotdb.db.service.metrics.enums.Metric;
+import org.apache.iotdb.db.service.metrics.enums.Tag;
import org.apache.iotdb.db.utils.SchemaUtils;
import org.apache.iotdb.db.utils.TestOnly;
import org.apache.iotdb.db.utils.TypeInferenceUtils;
@@ -869,6 +869,11 @@ public void deleteStorageGroups(List storageGroups) throws Metadata
removeFromTagInvertedIndex(leafMNode);
}
+ // unmark all storage group from related templates
+ for (Template template : templateManager.getTemplateMap().values()) {
+ template.unmarkStorageGroups(storageGroups);
+ }
+
// drop triggers with no exceptions
TriggerEngine.drop(leafMNodes);
@@ -934,7 +939,7 @@ protected IMNode getDeviceNodeWithAutoCreate(
}
}
node = mtree.getDeviceNodeWithAutoCreating(path, sgLevel);
- if (!(node.isStorageGroup())) {
+ if (!(node.isStorageGroup()) && !isRecovering) {
logWriter.autoCreateDeviceMNode(new AutoCreateDeviceMNodePlan(node.getPartialPath()));
}
return node;
@@ -946,7 +951,7 @@ protected IMNode getDeviceNodeWithAutoCreate(
}
// ignore set storage group concurrently
node = mtree.getDeviceNodeWithAutoCreating(path, sgLevel);
- if (!(node.isStorageGroup())) {
+ if (!(node.isStorageGroup()) && !isRecovering) {
logWriter.autoCreateDeviceMNode(new AutoCreateDeviceMNodePlan(node.getPartialPath()));
}
return node;
@@ -1522,8 +1527,14 @@ protected IMeasurementMNode getMeasurementMNode(IMNode deviceMNode, String measu
* @param path timeseries
* @param offset offset in the tag file
*/
- public void changeOffset(PartialPath path, long offset) throws MetadataException {
- mtree.getMeasurementMNode(path).setOffset(offset);
+ public void changeOffset(PartialPath path, long offset) throws MetadataException, IOException {
+ IMeasurementMNode mNode = mtree.getMeasurementMNode(path);
+ mNode.setOffset(offset);
+ // the timeseries has already been created and now system is recovering, using the tag info in
+ // tagFile to recover index directly
+ if (isRecovering) {
+ tagManager.recoverIndex(offset, mNode);
+ }
}
public void changeAlias(PartialPath path, String alias) throws MetadataException {
diff --git a/server/src/main/java/org/apache/iotdb/db/metadata/lastCache/LastCacheManager.java b/server/src/main/java/org/apache/iotdb/db/metadata/lastCache/LastCacheManager.java
index 6f57d912f0441..117f5cec07e0f 100644
--- a/server/src/main/java/org/apache/iotdb/db/metadata/lastCache/LastCacheManager.java
+++ b/server/src/main/java/org/apache/iotdb/db/metadata/lastCache/LastCacheManager.java
@@ -224,12 +224,12 @@ public static long getLastTimeStamp(IMeasurementMNode node, QueryContext queryCo
// because LastPointReader will do itself sort logic instead of depending on fillOrderIndex.
QueryDataSource dataSource =
QueryResourceManager.getInstance()
- .getQueryDataSource(node.getPartialPath(), queryContext, null, false);
+ .getQueryDataSource(node.getMeasurementPath(), queryContext, null, false);
Set measurementSet = new HashSet<>();
- measurementSet.add(node.getPartialPath().getFullPath());
+ measurementSet.add(node.getName());
LastPointReader lastReader =
new LastPointReader(
- node.getPartialPath(),
+ node.getMeasurementPath(),
node.getSchema().getType(),
measurementSet,
queryContext,
diff --git a/server/src/main/java/org/apache/iotdb/db/metadata/logfile/MLogTxtWriter.java b/server/src/main/java/org/apache/iotdb/db/metadata/logfile/MLogTxtWriter.java
index f4b85f95babae..c9660562dfa8e 100644
--- a/server/src/main/java/org/apache/iotdb/db/metadata/logfile/MLogTxtWriter.java
+++ b/server/src/main/java/org/apache/iotdb/db/metadata/logfile/MLogTxtWriter.java
@@ -58,6 +58,7 @@
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Collectors;
public class MLogTxtWriter implements AutoCloseable {
@@ -140,12 +141,14 @@ public void createAlignedTimeseries(CreateAlignedTimeSeriesPlan plan) throws IOE
buf.append(
String.format(
"%s,%s,%s,%s,%s,%s",
- MetadataOperationType.CREATE_TIMESERIES,
+ MetadataOperationType.CREATE_ALIGNED_TIMESERIES,
plan.getPrefixPath().getFullPath(),
plan.getMeasurements(),
- plan.getDataTypes().stream().map(TSDataType::serialize),
- plan.getEncodings().stream().map(TSEncoding::serialize),
- plan.getCompressors().stream().map(CompressionType::serialize)));
+ plan.getDataTypes().stream().map(TSDataType::serialize).collect(Collectors.toList()),
+ plan.getEncodings().stream().map(TSEncoding::serialize).collect(Collectors.toList()),
+ plan.getCompressors().stream()
+ .map(CompressionType::serialize)
+ .collect(Collectors.toList())));
buf.append(",[");
if (plan.getAliasList() != null) {
diff --git a/server/src/main/java/org/apache/iotdb/db/metadata/mtree/MTree.java b/server/src/main/java/org/apache/iotdb/db/metadata/mtree/MTree.java
index 0d1b571c19c83..dac5640dee9f6 100644
--- a/server/src/main/java/org/apache/iotdb/db/metadata/mtree/MTree.java
+++ b/server/src/main/java/org/apache/iotdb/db/metadata/mtree/MTree.java
@@ -107,7 +107,6 @@
import java.util.stream.Stream;
import static java.util.stream.Collectors.toList;
-import static org.apache.iotdb.db.conf.IoTDBConstant.MULTI_LEVEL_PATH_WILDCARD;
import static org.apache.iotdb.db.conf.IoTDBConstant.ONE_LEVEL_PATH_WILDCARD;
import static org.apache.iotdb.db.metadata.lastCache.LastCacheManager.getLastTimeStamp;
@@ -738,8 +737,9 @@ public boolean isPathExist(PartialPath path) {
return false;
}
cur = upperTemplate.getDirectNode(nodeNames[i]);
+ } else {
+ cur = cur.getChild(nodeNames[i]);
}
- cur = cur.getChild(nodeNames[i]);
if (cur.isMeasurement()) {
return i == nodeNames.length - 1;
}
@@ -1767,8 +1767,7 @@ public List getPathsSetOnTemplate(Template template) throws MetadataExce
List resSet = new ArrayList<>();
for (PartialPath sgPath : initPath) {
CollectorTraverser> setTemplatePaths =
- new CollectorTraverser>(
- this.root, sgPath.concatNode(MULTI_LEVEL_PATH_WILDCARD)) {
+ new CollectorTraverser>(this.root, sgPath) {
@Override
protected boolean processInternalMatchedMNode(IMNode node, int idx, int level)
throws MetadataException {
@@ -1784,6 +1783,10 @@ protected boolean processFullMatchedMNode(IMNode node, int idx, int level)
return true;
}
+ if (node.isMeasurement()) {
+ return true;
+ }
+
// if node not set template, go on traversing
if (node.getSchemaTemplate() != null) {
// if set template, and equals to target or target for all, add to result
@@ -1797,6 +1800,7 @@ protected boolean processFullMatchedMNode(IMNode node, int idx, int level)
return false;
}
};
+ setTemplatePaths.setPrefixMatch(true);
setTemplatePaths.traverse();
}
return resSet;
@@ -1812,8 +1816,7 @@ public List getPathsUsingTemplate(Template template) throws MetadataExce
for (PartialPath sgPath : initPath) {
CollectorTraverser> usingTemplatePaths =
- new CollectorTraverser>(
- this.root, sgPath.concatNode(MULTI_LEVEL_PATH_WILDCARD)) {
+ new CollectorTraverser>(this.root, sgPath) {
@Override
protected boolean processInternalMatchedMNode(IMNode node, int idx, int level)
throws MetadataException {
@@ -1844,7 +1847,7 @@ protected boolean processFullMatchedMNode(IMNode node, int idx, int level)
return false;
}
};
-
+ usingTemplatePaths.setPrefixMatch(true);
usingTemplatePaths.traverse();
}
return result;
diff --git a/server/src/main/java/org/apache/iotdb/db/metadata/mtree/traverser/Traverser.java b/server/src/main/java/org/apache/iotdb/db/metadata/mtree/traverser/Traverser.java
index 12590ab650749..a13a2f325b17f 100644
--- a/server/src/main/java/org/apache/iotdb/db/metadata/mtree/traverser/Traverser.java
+++ b/server/src/main/java/org/apache/iotdb/db/metadata/mtree/traverser/Traverser.java
@@ -273,7 +273,8 @@ public void setPrefixMatch(boolean isPrefixMatch) {
*/
protected PartialPath getCurrentPartialPath(IMNode currentNode) throws IllegalPathException {
Iterator nodes = traverseContext.descendingIterator();
- StringBuilder builder = new StringBuilder(nodes.next().getName());
+ StringBuilder builder =
+ nodes.hasNext() ? new StringBuilder(nodes.next().getName()) : new StringBuilder();
while (nodes.hasNext()) {
builder.append(TsFileConstant.PATH_SEPARATOR);
builder.append(nodes.next().getName());
diff --git a/server/src/main/java/org/apache/iotdb/db/metadata/template/Template.java b/server/src/main/java/org/apache/iotdb/db/metadata/template/Template.java
index 74f474c3dcb3d..abcf3aa3ab7ae 100644
--- a/server/src/main/java/org/apache/iotdb/db/metadata/template/Template.java
+++ b/server/src/main/java/org/apache/iotdb/db/metadata/template/Template.java
@@ -421,6 +421,10 @@ public boolean unmarkStorageGroup(IMNode unsetNode) {
return relatedStorageGroup.removeAll(getSGPaths(unsetNode));
}
+ public boolean unmarkStorageGroups(Collection sgPaths) {
+ return relatedStorageGroup.removeAll(sgPaths);
+ }
+
// endregion
// region inner utils
diff --git a/server/src/main/java/org/apache/iotdb/db/metadata/utils/MetaFormatUtils.java b/server/src/main/java/org/apache/iotdb/db/metadata/utils/MetaFormatUtils.java
index ed37eddaa147b..8e1d5c48c8fa8 100644
--- a/server/src/main/java/org/apache/iotdb/db/metadata/utils/MetaFormatUtils.java
+++ b/server/src/main/java/org/apache/iotdb/db/metadata/utils/MetaFormatUtils.java
@@ -82,7 +82,7 @@ private static void checkReservedNames(String name) throws MetadataException {
private static void checkNameFormat(String name) throws MetadataException {
if (!((name.startsWith("'") && name.endsWith("'"))
|| (name.startsWith("\"") && name.endsWith("\"")))
- && name.contains(".")) {
+ && (name.contains(".") || name.contains("*"))) {
throw new MetadataException(String.format("%s is an illegal name.", name));
}
}
diff --git a/server/src/main/java/org/apache/iotdb/db/metrics/micrometer/registry/IoTDBMeterRegistry.java b/server/src/main/java/org/apache/iotdb/db/metrics/micrometer/registry/IoTDBMeterRegistry.java
deleted file mode 100644
index fd511cf574f0e..0000000000000
--- a/server/src/main/java/org/apache/iotdb/db/metrics/micrometer/registry/IoTDBMeterRegistry.java
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.db.metrics.micrometer.registry;
-
-import org.apache.iotdb.db.conf.IoTDBConfig;
-import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.exception.StorageEngineException;
-import org.apache.iotdb.db.exception.metadata.IllegalPathException;
-import org.apache.iotdb.db.exception.metadata.StorageGroupNotSetException;
-import org.apache.iotdb.db.exception.query.QueryProcessException;
-import org.apache.iotdb.db.metadata.path.PartialPath;
-import org.apache.iotdb.db.metrics.metricsUtils;
-import org.apache.iotdb.db.qp.physical.crud.InsertRowPlan;
-import org.apache.iotdb.db.service.IoTDB;
-import org.apache.iotdb.db.service.basic.ServiceProvider;
-import org.apache.iotdb.db.utils.DataTypeUtils;
-import org.apache.iotdb.rpc.IoTDBConnectionException;
-import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
-
-import io.micrometer.core.instrument.Clock;
-import io.micrometer.core.instrument.Meter;
-import io.micrometer.core.instrument.Tag;
-import io.micrometer.core.instrument.distribution.HistogramSnapshot;
-import io.micrometer.core.instrument.step.StepMeterRegistry;
-import io.micrometer.core.instrument.step.StepRegistryConfig;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-
-public class IoTDBMeterRegistry extends StepMeterRegistry {
- private static final Logger logger = LoggerFactory.getLogger(IoTDBMeterRegistry.class);
- private final ServiceProvider serviceProvider;
- private final int rpcPort;
- private final String address;
-
- public IoTDBMeterRegistry(StepRegistryConfig config, Clock clock) {
- super(config, clock);
- IoTDBConfig ioTDBConfig = IoTDBDescriptor.getInstance().getConfig();
- rpcPort = ioTDBConfig.getRpcPort();
- address = ioTDBConfig.getRpcAddress();
- serviceProvider = IoTDB.serviceProvider;
- }
-
- @Override
- protected void publish() {
- getMeters()
- .forEach(
- meter -> {
- Meter.Id id = meter.getId();
- String name = id.getName();
- List tags = id.getTags();
- Map labels = tagsConvertToMap(tags);
- meter.use(
- gauge -> {
- updateValue(name, labels, gauge.value());
- },
- counter -> {
- updateValue(name, labels, counter.count());
- },
- timer -> {
- writeSnapshotAndCount(name, labels, timer.takeSnapshot());
- },
- summary -> {
- writeSnapshotAndCount(name, labels, summary.takeSnapshot());
- },
- longTaskTimer -> {
- updateValue(name, labels, (double) longTaskTimer.activeTasks());
- },
- timeGauge -> {
- updateValue(name, labels, timeGauge.value(getBaseTimeUnit()));
- },
- functionCounter -> {
- updateValue(name, labels, functionCounter.count());
- },
- functionTimer -> {
- updateValue(name, labels, functionTimer.count());
- },
- m -> {
- logger.debug("unknown meter:" + meter);
- });
- });
- }
-
- private void writeSnapshotAndCount(
- String name, Map labels, HistogramSnapshot snapshot) {
- updateValue(name + "_max", labels, snapshot.max());
- updateValue(name + "_mean", labels, snapshot.mean());
- updateValue(name + "_total", labels, snapshot.total());
- updateValue(name + "_count", labels, (double) snapshot.count());
- }
-
- private Map tagsConvertToMap(List tags) {
- Map labels = new HashMap<>();
- for (Tag tag : tags) {
- labels.put(tag.getKey(), tag.getValue());
- }
- return labels;
- }
-
- private void updateValue(String name, Map labels, Double value) {
- if (value != null) {
- try {
- InsertRowPlan insertRowPlan =
- new InsertRowPlan(
- new PartialPath(metricsUtils.generatePath(address, rpcPort, name, labels)),
- System.currentTimeMillis(),
- new String[] {"value"},
- DataTypeUtils.getValueBuffer(
- new ArrayList<>(Arrays.asList(TSDataType.DOUBLE)),
- new ArrayList<>(Arrays.asList(value))),
- false);
- serviceProvider.executeNonQuery(insertRowPlan);
- } catch (IllegalPathException
- | IoTDBConnectionException
- | QueryProcessException
- | StorageGroupNotSetException
- | StorageEngineException e) {
- logger.error("illegal insertRowPlan,reason:" + e.getMessage());
- }
- }
- }
-
- @Override
- protected TimeUnit getBaseTimeUnit() {
- return TimeUnit.MILLISECONDS;
- }
-}
diff --git a/server/src/main/java/org/apache/iotdb/db/qp/executor/PlanExecutor.java b/server/src/main/java/org/apache/iotdb/db/qp/executor/PlanExecutor.java
index 3ed5854afff59..f57a473b1ea41 100644
--- a/server/src/main/java/org/apache/iotdb/db/qp/executor/PlanExecutor.java
+++ b/server/src/main/java/org/apache/iotdb/db/qp/executor/PlanExecutor.java
@@ -31,11 +31,13 @@
import org.apache.iotdb.db.conf.IoTDBConstant;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.engine.StorageEngine;
+import org.apache.iotdb.db.engine.cache.BloomFilterCache;
import org.apache.iotdb.db.engine.cache.ChunkCache;
import org.apache.iotdb.db.engine.cache.TimeSeriesMetadataCache;
import org.apache.iotdb.db.engine.cq.ContinuousQueryService;
import org.apache.iotdb.db.engine.flush.pool.FlushTaskPoolManager;
import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
+import org.apache.iotdb.db.engine.storagegroup.TsFileResourceStatus;
import org.apache.iotdb.db.engine.storagegroup.VirtualStorageGroupProcessor.TimePartitionFilter;
import org.apache.iotdb.db.engine.trigger.service.TriggerRegistrationService;
import org.apache.iotdb.db.exception.BatchProcessException;
@@ -157,6 +159,7 @@
import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
+import org.apache.iotdb.tsfile.read.TsFileCheckStatus;
import org.apache.iotdb.tsfile.read.TsFileSequenceReader;
import org.apache.iotdb.tsfile.read.common.Field;
import org.apache.iotdb.tsfile.read.common.Path;
@@ -508,9 +511,10 @@ private void operateMerge() throws StorageEngineException {
StorageEngine.getInstance().mergeAll();
}
- private void operateClearCache() {
+ public static void operateClearCache() {
ChunkCache.getInstance().clear();
TimeSeriesMetadataCache.getInstance().clear();
+ BloomFilterCache.getInstance().clear();
}
private void operateCreateSnapshot() {
@@ -1269,7 +1273,7 @@ private void loadFile(File file, OperateFilePlan plan) throws QueryProcessExcept
return;
}
TsFileResource tsFileResource = new TsFileResource(file);
- tsFileResource.setClosed(true);
+ tsFileResource.setStatus(TsFileResourceStatus.CLOSED);
try {
// check file
RestorableTsFileIOWriter restorableTsFileIOWriter = new RestorableTsFileIOWriter(file);
@@ -1283,7 +1287,12 @@ private void loadFile(File file, OperateFilePlan plan) throws QueryProcessExcept
List chunkGroupMetadataList = new ArrayList<>();
try (TsFileSequenceReader reader = new TsFileSequenceReader(file.getAbsolutePath(), false)) {
- reader.selfCheck(schemaMap, chunkGroupMetadataList, false);
+ if (reader.selfCheck(schemaMap, chunkGroupMetadataList, false)
+ != TsFileCheckStatus.COMPLETE_FILE) {
+ throw new QueryProcessException(
+ String.format(
+ "Cannot load file %s because the file has crashed.", file.getAbsolutePath()));
+ }
if (plan.getVerifyMetadata()) {
loadNewTsFileVerifyMetadata(reader);
}
diff --git a/server/src/main/java/org/apache/iotdb/db/qp/logical/crud/SelectIntoOperator.java b/server/src/main/java/org/apache/iotdb/db/qp/logical/crud/SelectIntoOperator.java
index 09231c961ad0b..555bf0bebb3ef 100644
--- a/server/src/main/java/org/apache/iotdb/db/qp/logical/crud/SelectIntoOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/qp/logical/crud/SelectIntoOperator.java
@@ -67,6 +67,9 @@ public void check() throws LogicalOperatorException {
"select into: target paths in into clause should be different.");
}
+ checkWildcardsInPartialPaths(intoPaths);
+ checkWildcardsInPartialPaths(queryOperator.getFromComponent().getPrefixPaths());
+
if (queryOperator.isAlignByDevice()) {
throw new LogicalOperatorException("select into: align by device clauses are not supported.");
}
@@ -100,6 +103,18 @@ public void check() throws LogicalOperatorException {
}
}
+ private void checkWildcardsInPartialPaths(List paths)
+ throws LogicalOperatorException {
+ for (PartialPath path : paths) {
+ for (String node : path.getNodes()) {
+ if ("*".equals(node) || "**".equals(node)) {
+ throw new LogicalOperatorException(
+ "select into: * and ** are not allowed in a target path.");
+ }
+ }
+ }
+ }
+
public void setQueryOperator(QueryOperator queryOperator) {
this.queryOperator = queryOperator;
}
diff --git a/server/src/main/java/org/apache/iotdb/db/qp/physical/crud/InsertRowPlan.java b/server/src/main/java/org/apache/iotdb/db/qp/physical/crud/InsertRowPlan.java
index ee263cca21890..85693d2879e47 100644
--- a/server/src/main/java/org/apache/iotdb/db/qp/physical/crud/InsertRowPlan.java
+++ b/server/src/main/java/org/apache/iotdb/db/qp/physical/crud/InsertRowPlan.java
@@ -342,8 +342,7 @@ public void subSerialize(DataOutputStream stream) throws IOException {
}
void serializeMeasurementsAndValues(DataOutputStream stream) throws IOException {
- stream.writeInt(
- measurements.length - (failedMeasurements == null ? 0 : failedMeasurements.size()));
+ stream.writeInt(measurements.length - getFailedMeasurementNumber());
for (String m : measurements) {
if (m != null) {
@@ -352,7 +351,7 @@ void serializeMeasurementsAndValues(DataOutputStream stream) throws IOException
}
try {
- stream.writeInt(dataTypes.length);
+ stream.writeInt(values.length - getFailedMeasurementNumber());
putValues(stream);
} catch (QueryProcessException e) {
throw new IOException(e);
@@ -368,7 +367,6 @@ void serializeMeasurementsAndValues(DataOutputStream stream) throws IOException
private void putValues(DataOutputStream outputStream) throws QueryProcessException, IOException {
for (int i = 0; i < values.length; i++) {
if (values[i] == null) {
- ReadWriteIOUtils.write(TYPE_NULL, outputStream);
continue;
}
// types are not determined, the situation mainly occurs when the plan uses string values
@@ -407,7 +405,6 @@ private void putValues(DataOutputStream outputStream) throws QueryProcessExcepti
private void putValues(ByteBuffer buffer) throws QueryProcessException {
for (int i = 0; i < values.length; i++) {
if (values[i] == null) {
- ReadWriteIOUtils.write(TYPE_NULL, buffer);
continue;
}
// types are not determined, the situation mainly occurs when the plan uses string values
@@ -493,8 +490,7 @@ public void subSerialize(ByteBuffer buffer) {
}
void serializeMeasurementsAndValues(ByteBuffer buffer) {
- buffer.putInt(
- measurements.length - (failedMeasurements == null ? 0 : failedMeasurements.size()));
+ buffer.putInt(measurements.length - getFailedMeasurementNumber());
for (String measurement : measurements) {
if (measurement != null) {
@@ -502,7 +498,7 @@ void serializeMeasurementsAndValues(ByteBuffer buffer) {
}
}
try {
- buffer.putInt(dataTypes.length);
+ buffer.putInt(values.length - getFailedMeasurementNumber());
putValues(buffer);
} catch (QueryProcessException e) {
logger.error("Failed to serialize values for {}", this, e);
diff --git a/server/src/main/java/org/apache/iotdb/db/qp/physical/crud/InsertTabletPlan.java b/server/src/main/java/org/apache/iotdb/db/qp/physical/crud/InsertTabletPlan.java
index d7615061956b1..30ec3ebfb4a7f 100644
--- a/server/src/main/java/org/apache/iotdb/db/qp/physical/crud/InsertTabletPlan.java
+++ b/server/src/main/java/org/apache/iotdb/db/qp/physical/crud/InsertTabletPlan.java
@@ -51,11 +51,9 @@ public class InsertTabletPlan extends InsertPlan {
private static final String DATATYPE_UNSUPPORTED = "Data type %s is not supported.";
private long[] times; // times should be sorted. It is done in the session API.
- private ByteBuffer timeBuffer;
private BitMap[] bitMaps;
private Object[] columns;
- private ByteBuffer valueBuffer;
private int rowCount = 0;
// indicate whether this plan has been set 'start' or 'end' in order to support plan transmission
// without data loss in cluster version
@@ -170,8 +168,7 @@ public void subSerialize(DataOutputStream stream) throws IOException {
}
private void writeMeasurements(DataOutputStream stream) throws IOException {
- stream.writeInt(
- measurements.length - (failedMeasurements == null ? 0 : failedMeasurements.size()));
+ stream.writeInt(measurements.length - getFailedMeasurementNumber());
for (String m : measurements) {
if (m == null) {
continue;
@@ -181,13 +178,12 @@ private void writeMeasurements(DataOutputStream stream) throws IOException {
}
private void writeDataTypes(DataOutputStream stream) throws IOException {
- stream.writeInt(dataTypes.length);
+ stream.writeInt(dataTypes.length - getFailedMeasurementNumber());
for (int i = 0; i < dataTypes.length; i++) {
if (columns[i] == null) {
continue;
}
- TSDataType dataType = dataTypes[i];
- stream.write(dataType.serialize());
+ dataTypes[i].serializeTo(stream);
}
}
@@ -198,37 +194,36 @@ private void writeTimes(DataOutputStream stream) throws IOException {
stream.writeInt(rowCount);
}
- if (timeBuffer == null) {
- if (isExecuting) {
- for (int i = start; i < end; i++) {
- stream.writeLong(times[i]);
- }
- } else {
- for (long time : times) {
- stream.writeLong(time);
- }
+ if (isExecuting) {
+ for (int i = start; i < end; i++) {
+ stream.writeLong(times[i]);
}
} else {
- stream.write(timeBuffer.array());
- timeBuffer = null;
+ for (long time : times) {
+ stream.writeLong(time);
+ }
}
}
private void writeBitMaps(DataOutputStream stream) throws IOException {
stream.writeBoolean(bitMaps != null);
if (bitMaps != null) {
- for (BitMap bitMap : bitMaps) {
- if (bitMap == null) {
+ for (int i = 0; i < bitMaps.length; ++i) {
+ if (columns[i] == null) {
+ continue;
+ }
+
+ if (bitMaps[i] == null) {
stream.writeBoolean(false);
} else {
stream.writeBoolean(true);
if (isExecuting) {
int len = end - start;
BitMap partBitMap = new BitMap(len);
- BitMap.copyOfRange(bitMap, start, partBitMap, 0, len);
+ BitMap.copyOfRange(bitMaps[i], start, partBitMap, 0, len);
stream.write(partBitMap.getByteArray());
} else {
- stream.write(bitMap.getByteArray());
+ stream.write(bitMaps[i].getByteArray());
}
}
}
@@ -236,13 +231,7 @@ private void writeBitMaps(DataOutputStream stream) throws IOException {
}
private void writeValues(DataOutputStream stream) throws IOException {
- if (valueBuffer == null) {
- serializeValues(stream);
- } else {
- stream.write(valueBuffer.array());
- valueBuffer = null;
- }
-
+ serializeValues(stream);
stream.writeLong(index);
}
@@ -264,8 +253,7 @@ public void subSerialize(ByteBuffer buffer) {
}
private void writeMeasurements(ByteBuffer buffer) {
- buffer.putInt(
- measurements.length - (failedMeasurements == null ? 0 : failedMeasurements.size()));
+ buffer.putInt(measurements.length - getFailedMeasurementNumber());
for (String m : measurements) {
if (m != null) {
putString(buffer, m);
@@ -274,13 +262,12 @@ private void writeMeasurements(ByteBuffer buffer) {
}
private void writeDataTypes(ByteBuffer buffer) {
- buffer.putInt(dataTypes.length - (failedMeasurements == null ? 0 : failedMeasurements.size()));
- for (int i = 0, dataTypesLength = dataTypes.length; i < dataTypesLength; i++) {
- TSDataType dataType = dataTypes[i];
+ buffer.putInt(dataTypes.length - getFailedMeasurementNumber());
+ for (int i = 0; i < dataTypes.length; i++) {
if (columns[i] == null) {
continue;
}
- dataType.serializeTo(buffer);
+ dataTypes[i].serializeTo(buffer);
}
}
@@ -291,37 +278,36 @@ private void writeTimes(ByteBuffer buffer) {
buffer.putInt(rowCount);
}
- if (timeBuffer == null) {
- if (isExecuting) {
- for (int i = start; i < end; i++) {
- buffer.putLong(times[i]);
- }
- } else {
- for (long time : times) {
- buffer.putLong(time);
- }
+ if (isExecuting) {
+ for (int i = start; i < end; i++) {
+ buffer.putLong(times[i]);
}
} else {
- buffer.put(timeBuffer.array());
- timeBuffer = null;
+ for (long time : times) {
+ buffer.putLong(time);
+ }
}
}
private void writeBitMaps(ByteBuffer buffer) {
buffer.put(BytesUtils.boolToByte(bitMaps != null));
if (bitMaps != null) {
- for (BitMap bitMap : bitMaps) {
- if (bitMap == null) {
+ for (int i = 0; i < bitMaps.length; i++) {
+ if (columns[i] == null) {
+ continue;
+ }
+
+ if (bitMaps[i] == null) {
buffer.put(BytesUtils.boolToByte(false));
} else {
buffer.put(BytesUtils.boolToByte(true));
if (isExecuting) {
int len = end - start;
BitMap partBitMap = new BitMap(len);
- BitMap.copyOfRange(bitMap, start, partBitMap, 0, len);
+ BitMap.copyOfRange(bitMaps[i], start, partBitMap, 0, len);
buffer.put(partBitMap.getByteArray());
} else {
- buffer.put(bitMap.getByteArray());
+ buffer.put(bitMaps[i].getByteArray());
}
}
}
@@ -329,18 +315,12 @@ private void writeBitMaps(ByteBuffer buffer) {
}
private void writeValues(ByteBuffer buffer) {
- if (valueBuffer == null) {
- serializeValues(buffer);
- } else {
- buffer.put(valueBuffer.array());
- valueBuffer = null;
- }
-
+ serializeValues(buffer);
buffer.putLong(index);
}
private void serializeValues(DataOutputStream outputStream) throws IOException {
- for (int i = 0; i < dataTypes.length; i++) {
+ for (int i = 0; i < columns.length; i++) {
if (columns[i] == null) {
continue;
}
@@ -349,7 +329,7 @@ private void serializeValues(DataOutputStream outputStream) throws IOException {
}
private void serializeValues(ByteBuffer buffer) {
- for (int i = 0; i < dataTypes.length; i++) {
+ for (int i = 0; i < columns.length; i++) {
if (columns[i] == null) {
continue;
}
@@ -452,16 +432,6 @@ private void serializeColumn(
}
}
- public void setTimeBuffer(ByteBuffer timeBuffer) {
- this.timeBuffer = timeBuffer;
- this.timeBuffer.position(0);
- }
-
- public void setValueBuffer(ByteBuffer valueBuffer) {
- this.valueBuffer = valueBuffer;
- this.timeBuffer.position(0);
- }
-
@Override
public void deserialize(ByteBuffer buffer) throws IllegalPathException {
this.devicePath = new PartialPath(readString(buffer));
@@ -649,8 +619,6 @@ public boolean equals(Object o) {
return rowCount == that.rowCount
&& Objects.equals(devicePath, that.devicePath)
&& Arrays.equals(times, that.times)
- && Objects.equals(timeBuffer, that.timeBuffer)
- && Objects.equals(valueBuffer, that.valueBuffer)
&& Objects.equals(paths, that.paths)
&& Objects.equals(range, that.range)
&& Objects.equals(isAligned, that.isAligned);
@@ -658,7 +626,7 @@ public boolean equals(Object o) {
@Override
public int hashCode() {
- int result = Objects.hash(timeBuffer, valueBuffer, rowCount, paths, range);
+ int result = Objects.hash(rowCount, paths, range);
result = 31 * result + Arrays.hashCode(times);
return result;
}
diff --git a/server/src/main/java/org/apache/iotdb/db/query/aggregation/AggregateResult.java b/server/src/main/java/org/apache/iotdb/db/query/aggregation/AggregateResult.java
index 7daa1e8d60bdb..8719a91dee1e9 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/aggregation/AggregateResult.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/aggregation/AggregateResult.java
@@ -33,6 +33,7 @@
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
+import java.util.function.Predicate;
public abstract class AggregateResult {
@@ -84,11 +85,11 @@ public abstract void updateResultFromPageData(IBatchDataIterator batchIterator)
* This method is used in GROUP BY aggregation query.
*
* @param batchIterator the data in Page
- * @param minBound calculate points whose time >= bound
- * @param maxBound calculate points whose time < bound
+ * @param boundPredicate used to judge whether the current timestamp is out of time range, returns
+ * true if it is.
*/
public abstract void updateResultFromPageData(
- IBatchDataIterator batchIterator, long minBound, long maxBound) throws IOException;
+ IBatchDataIterator batchIterator, Predicate boundPredicate) throws IOException;
/**
* This method calculates the aggregation using common timestamps of the cross series filter.
diff --git a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/AvgAggrResult.java b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/AvgAggrResult.java
index 49fc5efa46d99..7046559cf4d51 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/AvgAggrResult.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/AvgAggrResult.java
@@ -35,6 +35,7 @@
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
+import java.util.function.Predicate;
public class AvgAggrResult extends AggregateResult {
@@ -84,16 +85,14 @@ public void updateResultFromStatistics(Statistics statistics) {
@Override
public void updateResultFromPageData(IBatchDataIterator batchIterator) {
- updateResultFromPageData(batchIterator, Long.MIN_VALUE, Long.MAX_VALUE);
+ updateResultFromPageData(batchIterator, time -> false);
}
@Override
public void updateResultFromPageData(
- IBatchDataIterator batchIterator, long minBound, long maxBound) {
- while (batchIterator.hasNext(minBound, maxBound)) {
- if (batchIterator.currentTime() >= maxBound || batchIterator.currentTime() < minBound) {
- break;
- }
+ IBatchDataIterator batchIterator, Predicate boundPredicate) {
+ while (batchIterator.hasNext(boundPredicate)
+ && !boundPredicate.test(batchIterator.currentTime())) {
updateAvg(seriesDataType, batchIterator.currentValue());
batchIterator.next();
}
diff --git a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/CountAggrResult.java b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/CountAggrResult.java
index ec279729f9057..d1745e2d79205 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/CountAggrResult.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/CountAggrResult.java
@@ -30,6 +30,7 @@
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
+import java.util.function.Predicate;
public class CountAggrResult extends AggregateResult {
@@ -56,12 +57,10 @@ public void updateResultFromPageData(IBatchDataIterator batchIterator) {
@Override
public void updateResultFromPageData(
- IBatchDataIterator batchIterator, long minBound, long maxBound) {
+ IBatchDataIterator batchIterator, Predicate boundPredicate) {
int cnt = 0;
- while (batchIterator.hasNext(minBound, maxBound)) {
- if (batchIterator.currentTime() >= maxBound || batchIterator.currentTime() < minBound) {
- break;
- }
+ while (batchIterator.hasNext(boundPredicate)
+ && !boundPredicate.test(batchIterator.currentTime())) {
cnt++;
batchIterator.next();
}
diff --git a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/ExtremeAggrResult.java b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/ExtremeAggrResult.java
index 8b40461331d70..3fa84bad55622 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/ExtremeAggrResult.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/ExtremeAggrResult.java
@@ -32,6 +32,7 @@
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
+import java.util.function.Predicate;
public class ExtremeAggrResult extends AggregateResult {
@@ -95,18 +96,15 @@ public void updateResultFromStatistics(Statistics statistics) {
@Override
public void updateResultFromPageData(IBatchDataIterator batchIterator) {
- updateResultFromPageData(batchIterator, Long.MIN_VALUE, Long.MAX_VALUE);
+ updateResultFromPageData(batchIterator, time -> false);
}
@Override
public void updateResultFromPageData(
- IBatchDataIterator batchIterator, long minBound, long maxBound) {
+ IBatchDataIterator batchIterator, Predicate boundPredicate) {
Comparable
-
- org.apache.iotdb
- iotdb-server
- ${project.version}
- test-jar
- test
-
-
- org.apache.iotdb
- iotdb-server
- ${project.version}
- test
- org.apache.iotdbiotdb-jdbc
diff --git a/session/src/main/java/org/apache/iotdb/session/Session.java b/session/src/main/java/org/apache/iotdb/session/Session.java
index 9a31211403f5f..8afc98b360cc6 100644
--- a/session/src/main/java/org/apache/iotdb/session/Session.java
+++ b/session/src/main/java/org/apache/iotdb/session/Session.java
@@ -38,11 +38,13 @@
import org.apache.iotdb.service.rpc.thrift.TSInsertStringRecordsReq;
import org.apache.iotdb.service.rpc.thrift.TSInsertTabletReq;
import org.apache.iotdb.service.rpc.thrift.TSInsertTabletsReq;
+import org.apache.iotdb.service.rpc.thrift.TSOperationSyncWriteReq;
import org.apache.iotdb.service.rpc.thrift.TSProtocolVersion;
import org.apache.iotdb.service.rpc.thrift.TSPruneSchemaTemplateReq;
import org.apache.iotdb.service.rpc.thrift.TSQueryTemplateReq;
import org.apache.iotdb.service.rpc.thrift.TSQueryTemplateResp;
import org.apache.iotdb.service.rpc.thrift.TSSetSchemaTemplateReq;
+import org.apache.iotdb.service.rpc.thrift.TSSetUsingTemplateReq;
import org.apache.iotdb.service.rpc.thrift.TSUnsetSchemaTemplateReq;
import org.apache.iotdb.session.template.MeasurementNode;
import org.apache.iotdb.session.template.Template;
@@ -1022,8 +1024,11 @@ public void insertRecords(
genTSInsertStringRecordsReq(deviceIds, times, measurementsList, valuesList, false);
try {
defaultSessionConnection.insertRecords(request);
- } catch (RedirectException ignored) {
- // ignore
+ } catch (RedirectException e) {
+ Map deviceEndPointMap = e.getDeviceEndPointMap();
+ for (Map.Entry deviceEndPointEntry : deviceEndPointMap.entrySet()) {
+ handleRedirection(deviceEndPointEntry.getKey(), deviceEndPointEntry.getValue());
+ }
}
}
}
@@ -1056,8 +1061,11 @@ public void insertAlignedRecords(
genTSInsertStringRecordsReq(deviceIds, times, measurementsList, valuesList, true);
try {
defaultSessionConnection.insertRecords(request);
- } catch (RedirectException ignored) {
- // ignore
+ } catch (RedirectException e) {
+ Map deviceEndPointMap = e.getDeviceEndPointMap();
+ for (Map.Entry deviceEndPointEntry : deviceEndPointMap.entrySet()) {
+ handleRedirection(deviceEndPointEntry.getKey(), deviceEndPointEntry.getValue());
+ }
}
}
}
@@ -1139,8 +1147,11 @@ public void insertRecords(
genTSInsertRecordsReq(deviceIds, times, measurementsList, typesList, valuesList, false);
try {
defaultSessionConnection.insertRecords(request);
- } catch (RedirectException ignored) {
- // ignore
+ } catch (RedirectException e) {
+ Map deviceEndPointMap = e.getDeviceEndPointMap();
+ for (Map.Entry deviceEndPointEntry : deviceEndPointMap.entrySet()) {
+ handleRedirection(deviceEndPointEntry.getKey(), deviceEndPointEntry.getValue());
+ }
}
}
}
@@ -1174,8 +1185,11 @@ public void insertAlignedRecords(
genTSInsertRecordsReq(deviceIds, times, measurementsList, typesList, valuesList, true);
try {
defaultSessionConnection.insertRecords(request);
- } catch (RedirectException ignored) {
- // ignore
+ } catch (RedirectException e) {
+ Map deviceEndPointMap = e.getDeviceEndPointMap();
+ for (Map.Entry deviceEndPointEntry : deviceEndPointMap.entrySet()) {
+ handleRedirection(deviceEndPointEntry.getKey(), deviceEndPointEntry.getValue());
+ }
}
}
}
@@ -1657,8 +1671,11 @@ public void insertTablets(Map tablets, boolean sorted)
genTSInsertTabletsReq(new ArrayList<>(tablets.values()), sorted, false);
try {
defaultSessionConnection.insertTablets(request);
- } catch (RedirectException ignored) {
- // ignored
+ } catch (RedirectException e) {
+ Map deviceEndPointMap = e.getDeviceEndPointMap();
+ for (Map.Entry deviceEndPointEntry : deviceEndPointMap.entrySet()) {
+ handleRedirection(deviceEndPointEntry.getKey(), deviceEndPointEntry.getValue());
+ }
}
}
}
@@ -1692,8 +1709,11 @@ public void insertAlignedTablets(Map tablets, boolean sorted)
genTSInsertTabletsReq(new ArrayList<>(tablets.values()), sorted, true);
try {
defaultSessionConnection.insertTablets(request);
- } catch (RedirectException ignored) {
- // ignored
+ } catch (RedirectException e) {
+ Map deviceEndPointMap = e.getDeviceEndPointMap();
+ for (Map.Entry deviceEndPointEntry : deviceEndPointMap.entrySet()) {
+ handleRedirection(deviceEndPointEntry.getKey(), deviceEndPointEntry.getValue());
+ }
}
}
}
@@ -2349,6 +2369,14 @@ public void unsetSchemaTemplate(String prefixPath, String templateName)
defaultSessionConnection.unsetSchemaTemplate(request);
}
+ /** Set designated path using template, act like the sql-statement with same name and syntax. */
+ public void createTimeseriesOfTemplateOnPath(String path)
+ throws IoTDBConnectionException, StatementExecutionException {
+ TSSetUsingTemplateReq request = new TSSetUsingTemplateReq();
+ request.setDstPath(path);
+ defaultSessionConnection.setUsingTemplate(request);
+ }
+
public void dropSchemaTemplate(String templateName)
throws IoTDBConnectionException, StatementExecutionException {
TSDropSchemaTemplateReq request = getTSDropSchemaTemplateReq(templateName);
@@ -2442,6 +2470,24 @@ private void insertByGroup(
}
}
+ /** Transmit insert record request for operation sync */
+ public void operationSyncTransmit(ByteBuffer buffer)
+ throws IoTDBConnectionException, StatementExecutionException {
+ try {
+ TSOperationSyncWriteReq request = genTSExecuteOperationSyncReq(buffer);
+ defaultSessionConnection.executeOperationSync(request);
+ } catch (RedirectException e) {
+ // ignored
+ }
+ }
+
+ private TSOperationSyncWriteReq genTSExecuteOperationSyncReq(ByteBuffer buffer) {
+ TSOperationSyncWriteReq request = new TSOperationSyncWriteReq();
+ request.setOperationSyncType((byte) 0);
+ request.setPhysicalPlan(buffer);
+ return request;
+ }
+
public boolean isEnableQueryRedirection() {
return enableQueryRedirection;
}
@@ -2470,7 +2516,7 @@ public static class Builder {
private boolean enableCacheLeader = Config.DEFAULT_CACHE_LEADER_MODE;
private Version version = Config.DEFAULT_VERSION;
- List nodeUrls = null;
+ private List nodeUrls = null;
public Builder host(String host) {
this.host = host;
@@ -2535,16 +2581,19 @@ public Session build() {
}
if (nodeUrls != null) {
- return new Session(
- nodeUrls,
- username,
- password,
- fetchSize,
- zoneId,
- thriftDefaultBufferSize,
- thriftMaxFrameSize,
- enableCacheLeader,
- version);
+ Session newSession =
+ new Session(
+ nodeUrls,
+ username,
+ password,
+ fetchSize,
+ zoneId,
+ thriftDefaultBufferSize,
+ thriftMaxFrameSize,
+ enableCacheLeader,
+ version);
+ newSession.setEnableQueryRedirection(true);
+ return newSession;
}
return new Session(
diff --git a/session/src/main/java/org/apache/iotdb/session/SessionConnection.java b/session/src/main/java/org/apache/iotdb/session/SessionConnection.java
index 930f5baaa178a..6635b9d013eb6 100644
--- a/session/src/main/java/org/apache/iotdb/session/SessionConnection.java
+++ b/session/src/main/java/org/apache/iotdb/session/SessionConnection.java
@@ -47,12 +47,14 @@
import org.apache.iotdb.service.rpc.thrift.TSLastDataQueryReq;
import org.apache.iotdb.service.rpc.thrift.TSOpenSessionReq;
import org.apache.iotdb.service.rpc.thrift.TSOpenSessionResp;
+import org.apache.iotdb.service.rpc.thrift.TSOperationSyncWriteReq;
import org.apache.iotdb.service.rpc.thrift.TSPruneSchemaTemplateReq;
import org.apache.iotdb.service.rpc.thrift.TSQueryTemplateReq;
import org.apache.iotdb.service.rpc.thrift.TSQueryTemplateResp;
import org.apache.iotdb.service.rpc.thrift.TSRawDataQueryReq;
import org.apache.iotdb.service.rpc.thrift.TSSetSchemaTemplateReq;
import org.apache.iotdb.service.rpc.thrift.TSSetTimeZoneReq;
+import org.apache.iotdb.service.rpc.thrift.TSSetUsingTemplateReq;
import org.apache.iotdb.service.rpc.thrift.TSStatus;
import org.apache.iotdb.service.rpc.thrift.TSUnsetSchemaTemplateReq;
import org.apache.iotdb.session.util.SessionUtils;
@@ -923,6 +925,25 @@ protected void unsetSchemaTemplate(TSUnsetSchemaTemplateReq request)
}
}
+ protected void setUsingTemplate(TSSetUsingTemplateReq request)
+ throws IoTDBConnectionException, StatementExecutionException {
+ request.setSessionId(sessionId);
+ try {
+ RpcUtils.verifySuccess(client.setUsingTemplate(request));
+ } catch (TException e) {
+ if (reconnect()) {
+ try {
+ request.setSessionId(sessionId);
+ RpcUtils.verifySuccess(client.setUsingTemplate(request));
+ } catch (TException tException) {
+ throw new IoTDBConnectionException(tException);
+ }
+ } else {
+ throw new IoTDBConnectionException(MSG_RECONNECTION_FAIL);
+ }
+ }
+ }
+
protected void dropSchemaTemplate(TSDropSchemaTemplateReq request)
throws IoTDBConnectionException, StatementExecutionException {
request.setSessionId(sessionId);
@@ -942,6 +963,25 @@ protected void dropSchemaTemplate(TSDropSchemaTemplateReq request)
}
}
+ protected void executeOperationSync(TSOperationSyncWriteReq request)
+ throws IoTDBConnectionException, StatementExecutionException, RedirectException {
+ request.setSessionId(sessionId);
+ try {
+ RpcUtils.verifySuccessWithRedirection(client.executeOperationSync(request));
+ } catch (TException e) {
+ if (reconnect()) {
+ try {
+ request.setSessionId(sessionId);
+ RpcUtils.verifySuccess(client.executeOperationSync(request));
+ } catch (TException tException) {
+ throw new IoTDBConnectionException(tException);
+ }
+ } else {
+ throw new IoTDBConnectionException(MSG_RECONNECTION_FAIL);
+ }
+ }
+ }
+
public boolean isEnableRedirect() {
return enableRedirect;
}
diff --git a/session/src/main/java/org/apache/iotdb/session/pool/SessionPool.java b/session/src/main/java/org/apache/iotdb/session/pool/SessionPool.java
index 900f4185e4497..841581f6a2ee2 100644
--- a/session/src/main/java/org/apache/iotdb/session/pool/SessionPool.java
+++ b/session/src/main/java/org/apache/iotdb/session/pool/SessionPool.java
@@ -33,6 +33,7 @@
import org.slf4j.LoggerFactory;
import java.io.IOException;
+import java.nio.ByteBuffer;
import java.time.ZoneId;
import java.util.List;
import java.util.Map;
@@ -93,6 +94,9 @@ public class SessionPool {
// whether the queue is closed.
private boolean closed;
+ // Redirect-able SessionPool
+ private final List nodeUrls;
+
public SessionPool(String host, int port, String user, String password, int maxSize) {
this(
host,
@@ -108,6 +112,20 @@ public SessionPool(String host, int port, String user, String password, int maxS
Config.DEFAULT_CONNECTION_TIMEOUT_MS);
}
+ public SessionPool(List nodeUrls, String user, String password, int maxSize) {
+ this(
+ nodeUrls,
+ user,
+ password,
+ maxSize,
+ Config.DEFAULT_FETCH_SIZE,
+ 60_000,
+ false,
+ null,
+ Config.DEFAULT_CACHE_LEADER_MODE,
+ Config.DEFAULT_CONNECTION_TIMEOUT_MS);
+ }
+
public SessionPool(
String host, int port, String user, String password, int maxSize, boolean enableCompression) {
this(
@@ -124,6 +142,21 @@ public SessionPool(
Config.DEFAULT_CONNECTION_TIMEOUT_MS);
}
+ public SessionPool(
+ List nodeUrls, String user, String password, int maxSize, boolean enableCompression) {
+ this(
+ nodeUrls,
+ user,
+ password,
+ maxSize,
+ Config.DEFAULT_FETCH_SIZE,
+ 60_000,
+ enableCompression,
+ null,
+ Config.DEFAULT_CACHE_LEADER_MODE,
+ Config.DEFAULT_CONNECTION_TIMEOUT_MS);
+ }
+
public SessionPool(
String host,
int port,
@@ -146,6 +179,26 @@ public SessionPool(
Config.DEFAULT_CONNECTION_TIMEOUT_MS);
}
+ public SessionPool(
+ List nodeUrls,
+ String user,
+ String password,
+ int maxSize,
+ boolean enableCompression,
+ boolean enableCacheLeader) {
+ this(
+ nodeUrls,
+ user,
+ password,
+ maxSize,
+ Config.DEFAULT_FETCH_SIZE,
+ 60_000,
+ enableCompression,
+ null,
+ enableCacheLeader,
+ Config.DEFAULT_CONNECTION_TIMEOUT_MS);
+ }
+
public SessionPool(
String host, int port, String user, String password, int maxSize, ZoneId zoneId) {
this(
@@ -162,6 +215,21 @@ public SessionPool(
Config.DEFAULT_CONNECTION_TIMEOUT_MS);
}
+ public SessionPool(
+ List nodeUrls, String user, String password, int maxSize, ZoneId zoneId) {
+ this(
+ nodeUrls,
+ user,
+ password,
+ maxSize,
+ Config.DEFAULT_FETCH_SIZE,
+ 60_000,
+ false,
+ zoneId,
+ Config.DEFAULT_CACHE_LEADER_MODE,
+ Config.DEFAULT_CONNECTION_TIMEOUT_MS);
+ }
+
@SuppressWarnings("squid:S107")
public SessionPool(
String host,
@@ -178,6 +246,7 @@ public SessionPool(
this.maxSize = maxSize;
this.host = host;
this.port = port;
+ this.nodeUrls = null;
this.user = user;
this.password = password;
this.fetchSize = fetchSize;
@@ -188,6 +257,60 @@ public SessionPool(
this.connectionTimeoutInMs = connectionTimeoutInMs;
}
+ public SessionPool(
+ List nodeUrls,
+ String user,
+ String password,
+ int maxSize,
+ int fetchSize,
+ long waitToGetSessionTimeoutInMs,
+ boolean enableCompression,
+ ZoneId zoneId,
+ boolean enableCacheLeader,
+ int connectionTimeoutInMs) {
+ this.maxSize = maxSize;
+ this.host = null;
+ this.port = -1;
+ this.nodeUrls = nodeUrls;
+ this.user = user;
+ this.password = password;
+ this.fetchSize = fetchSize;
+ this.waitToGetSessionTimeoutInMs = waitToGetSessionTimeoutInMs;
+ this.enableCompression = enableCompression;
+ this.zoneId = zoneId;
+ this.enableCacheLeader = enableCacheLeader;
+ this.connectionTimeoutInMs = connectionTimeoutInMs;
+ }
+
+ private Session constructNewSession() {
+ Session session;
+ if (nodeUrls == null) {
+ // Construct custom Session
+ session =
+ new Session.Builder()
+ .host(host)
+ .port(port)
+ .username(user)
+ .password(password)
+ .fetchSize(fetchSize)
+ .zoneId(zoneId)
+ .enableCacheLeader(enableCacheLeader)
+ .build();
+ } else {
+ // Construct redirect-able Session
+ session =
+ new Session.Builder()
+ .nodeUrls(nodeUrls)
+ .username(user)
+ .password(password)
+ .fetchSize(fetchSize)
+ .zoneId(zoneId)
+ .enableCacheLeader(enableCacheLeader)
+ .build();
+ }
+ return session;
+ }
+
// if this method throws an exception, either the server is broken, or the ip/port/user/password
// is incorrect.
@SuppressWarnings({"squid:S3776", "squid:S2446"}) // Suppress high Cognitive Complexity warning
@@ -254,9 +377,15 @@ private Session getSession() throws IoTDBConnectionException {
if (shouldCreate) {
// create a new one.
if (logger.isDebugEnabled()) {
- logger.debug("Create a new Session {}, {}, {}, {}", host, port, user, password);
+ if (nodeUrls == null) {
+ logger.debug("Create a new Session {}, {}, {}, {}", host, port, user, password);
+ } else {
+ logger.debug("Create a new redirect Session {}, {}, {}", nodeUrls, user, password);
+ }
}
- session = new Session(host, port, user, password, fetchSize, zoneId, enableCacheLeader);
+
+ session = constructNewSession();
+
try {
session.open(enableCompression, connectionTimeoutInMs);
// avoid someone has called close() the session pool
@@ -352,7 +481,7 @@ public void closeResultSet(SessionDataSetWrapper wrapper) {
@SuppressWarnings({"squid:S2446"})
private void tryConstructNewSession() {
- Session session = new Session(host, port, user, password, fetchSize, zoneId, enableCacheLeader);
+ Session session = constructNewSession();
try {
session.open(enableCompression, connectionTimeoutInMs);
// avoid someone has called close() the session pool
@@ -1999,6 +2128,24 @@ public void unsetSchemaTemplate(String prefixPath, String templateName)
}
}
+ public void createTimeseriesOfTemplateOnPath(String path)
+ throws IoTDBConnectionException, StatementExecutionException {
+ for (int i = 0; i < RETRY; i++) {
+ Session session = getSession();
+ try {
+ session.createTimeseriesOfTemplateOnPath(path);
+ putBack(session);
+ } catch (IoTDBConnectionException e) {
+ // TException means the connection is broken, remove it and get a new one.
+ logger.warn(String.format("create timeseries of template on [%s] failed", path), e);
+ cleanSessionAndMayThrowConnectionException(session, i, e);
+ } catch (StatementExecutionException | RuntimeException e) {
+ putBack(session);
+ throw e;
+ }
+ }
+ }
+
public void dropSchemaTemplate(String templateName)
throws StatementExecutionException, IoTDBConnectionException {
for (int i = 0; i < RETRY; i++) {
@@ -2129,6 +2276,28 @@ public SessionDataSetWrapper executeRawDataQuery(List paths, long startT
return null;
}
+ /** Transmit insert record request for OperationSync */
+ public boolean operationSyncTransmit(ByteBuffer buffer)
+ throws IoTDBConnectionException, StatementExecutionException {
+ for (int i = 0; i < RETRY; i++) {
+ Session session = getSession();
+ try {
+ buffer.position(0);
+ session.operationSyncTransmit(buffer);
+ putBack(session);
+ return true;
+ } catch (IoTDBConnectionException e) {
+ // TException means the connection is broken, remove it and get a new one.
+ cleanSessionAndMayThrowConnectionException(session, i, e);
+ } catch (StatementExecutionException | RuntimeException e) {
+ putBack(session);
+ throw e;
+ }
+ }
+
+ return false;
+ }
+
public int getMaxSize() {
return maxSize;
}
@@ -2177,6 +2346,7 @@ public static class Builder {
private String host = Config.DEFAULT_HOST;
private int port = Config.DEFAULT_PORT;
+ private List nodeUrls = null;
private int maxSize = Config.DEFAULT_SESSION_POOL_MAX_SIZE;
private String user = Config.DEFAULT_USER;
private String password = Config.DEFAULT_PASSWORD;
@@ -2197,6 +2367,11 @@ public Builder port(int port) {
return this;
}
+ public Builder nodeUrls(List nodeUrls) {
+ this.nodeUrls = nodeUrls;
+ return this;
+ }
+
public Builder maxSize(int maxSize) {
this.maxSize = maxSize;
return this;
@@ -2243,18 +2418,32 @@ public Builder connectionTimeoutInMs(int connectionTimeoutInMs) {
}
public SessionPool build() {
- return new SessionPool(
- host,
- port,
- user,
- password,
- maxSize,
- fetchSize,
- waitToGetSessionTimeoutInMs,
- enableCompression,
- zoneId,
- enableCacheLeader,
- connectionTimeoutInMs);
+ if (nodeUrls == null) {
+ return new SessionPool(
+ host,
+ port,
+ user,
+ password,
+ maxSize,
+ fetchSize,
+ waitToGetSessionTimeoutInMs,
+ enableCompression,
+ zoneId,
+ enableCacheLeader,
+ connectionTimeoutInMs);
+ } else {
+ return new SessionPool(
+ nodeUrls,
+ user,
+ password,
+ maxSize,
+ fetchSize,
+ waitToGetSessionTimeoutInMs,
+ enableCompression,
+ zoneId,
+ enableCacheLeader,
+ connectionTimeoutInMs);
+ }
}
}
}
diff --git a/site/pom.xml b/site/pom.xml
index 6bf6046d44192..a0b7a1f128fc1 100644
--- a/site/pom.xml
+++ b/site/pom.xml
@@ -23,7 +23,7 @@
iotdb-parentorg.apache.iotdb
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xml4.0.0
diff --git a/spark-iotdb-connector/pom.xml b/spark-iotdb-connector/pom.xml
index ac5c1af849b30..862bc51a198ee 100644
--- a/spark-iotdb-connector/pom.xml
+++ b/spark-iotdb-connector/pom.xml
@@ -24,7 +24,7 @@
org.apache.iotdbiotdb-parent
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmlspark-iotdb-connector
diff --git a/spark-tsfile/pom.xml b/spark-tsfile/pom.xml
index 644d2df560490..22d35dfdf75b7 100644
--- a/spark-tsfile/pom.xml
+++ b/spark-tsfile/pom.xml
@@ -24,7 +24,7 @@
org.apache.iotdbiotdb-parent
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmlspark-tsfile
diff --git a/testcontainer/pom.xml b/testcontainer/pom.xml
index f9868a6781a58..1e22d9dc47328 100644
--- a/testcontainer/pom.xml
+++ b/testcontainer/pom.xml
@@ -23,7 +23,7 @@
org.apache.iotdbiotdb-parent
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xml4.0.0
diff --git a/testcontainer/src/tool/parser.py b/testcontainer/src/tool/parser.py
index 96d50ac5dfbde..5cee0a85a05fd 100644
--- a/testcontainer/src/tool/parser.py
+++ b/testcontainer/src/tool/parser.py
@@ -20,7 +20,7 @@
import sys
import re
-pattern = re.compile(r'docker\-java\-stream\-+(\d+)')
+pattern = re.compile(r"docker\-java\-stream\-+(\d+)")
def getAllLogs(filename):
@@ -76,4 +76,4 @@ def output(nodes, i):
output(nodes, i)
count = count + 1
- print("find {} failed tests".format(count))
\ No newline at end of file
+ print("find {} failed tests".format(count))
diff --git a/thrift-cluster/pom.xml b/thrift-cluster/pom.xml
index c48219053bdd1..4bf8be1b556f3 100644
--- a/thrift-cluster/pom.xml
+++ b/thrift-cluster/pom.xml
@@ -24,7 +24,7 @@
org.apache.iotdbiotdb-parent
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmliotdb-thrift-cluster
diff --git a/thrift-influxdb/pom.xml b/thrift-influxdb/pom.xml
index f2ec7296fc9df..9a072589ff16c 100644
--- a/thrift-influxdb/pom.xml
+++ b/thrift-influxdb/pom.xml
@@ -24,7 +24,7 @@
org.apache.iotdbiotdb-parent
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmlinfluxdb-thrift
diff --git a/thrift-sync/pom.xml b/thrift-sync/pom.xml
index 66015e13a1999..63b8f7e1033f3 100644
--- a/thrift-sync/pom.xml
+++ b/thrift-sync/pom.xml
@@ -24,7 +24,7 @@
org.apache.iotdbiotdb-parent
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmliotdb-thrift-sync
diff --git a/thrift/pom.xml b/thrift/pom.xml
index 89d014677ef0c..a00e55a937359 100644
--- a/thrift/pom.xml
+++ b/thrift/pom.xml
@@ -24,7 +24,7 @@
org.apache.iotdbiotdb-parent
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmliotdb-thrift
diff --git a/thrift/rpc-changelist.md b/thrift/rpc-changelist.md
index 43570d8f52514..f3095478f1a9c 100644
--- a/thrift/rpc-changelist.md
+++ b/thrift/rpc-changelist.md
@@ -30,14 +30,15 @@ Last Updated on 2022.1.17 by Xin Zhao.
## 2. Add New
-| Latest Changes | Related Committers |
-|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------|
-| Add TSTracingInfo | Minghui Liu |
+| Latest Changes | Related Committers |
+|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------|
+| Add TSTracingInfo | Minghui Liu |
| Add structs and interfaces to append, prune, query and unset Schema Template (detail: TSAppendSchemaTemplateReq, TSPruneSchemaTemplateReq, TSQueryTemplateReq, TSQueryTemplateResp, TSUnsetSchemaTemplateReq, appendSchemaTemplate, pruneSchemaTemplate, querySchemaTemplate, unsetSchemaTemplate), and serializedTemplate in TSCreateSchemaTemplateReq | Xin Zhao |
-| Add struct TSInsertStringRecordsOfOneDeviceReq | Hang Zhang |
-| Add method TSStatus insertStringRecordsOfOneDevice(1:TSInsertStringRecordsOfOneDeviceReq req) | Hang Zhang |
-| Add TSDropSchemaTemplateReq, TSStatus dropSchemaTemplate | Xin Zhao |
+| Add struct TSInsertStringRecordsOfOneDeviceReq | Hang Zhang |
+| Add method TSStatus insertStringRecordsOfOneDevice(1:TSInsertStringRecordsOfOneDeviceReq req) | Hang Zhang |
+| Add TSDropSchemaTemplateReq, TSStatus dropSchemaTemplate | Xin Zhao |
| Add TSCreateAlignedTimeseriesReq | Haonan Hou |
+| Add TSOperationSyncWriteReq | Rongzhao Chen |
## 3. Update
diff --git a/thrift/src/main/thrift/rpc.thrift b/thrift/src/main/thrift/rpc.thrift
index 78ccce6db6e8f..cc38805c892d8 100644
--- a/thrift/src/main/thrift/rpc.thrift
+++ b/thrift/src/main/thrift/rpc.thrift
@@ -411,11 +411,22 @@ struct TSUnsetSchemaTemplateReq {
3: required string templateName
}
+struct TSSetUsingTemplateReq {
+ 1: required i64 sessionId
+ 2: required string dstPath
+}
+
struct TSDropSchemaTemplateReq {
1: required i64 sessionId
2: required string templateName
}
+struct TSOperationSyncWriteReq {
+ 1: required i64 sessionId
+ 2: required byte operationSyncType
+ 3: required binary physicalPlan
+}
+
service TSIService {
TSOpenSessionResp openSession(1:TSOpenSessionReq req);
@@ -505,5 +516,9 @@ service TSIService {
TSStatus unsetSchemaTemplate(1:TSUnsetSchemaTemplateReq req);
+ TSStatus setUsingTemplate(1:TSSetUsingTemplateReq req);
+
TSStatus dropSchemaTemplate(1:TSDropSchemaTemplateReq req);
+
+ TSStatus executeOperationSync(1:TSOperationSyncWriteReq req);
}
\ No newline at end of file
diff --git a/tsfile/pom.xml b/tsfile/pom.xml
index 10a39adbe7d50..8045cd6bf2e12 100644
--- a/tsfile/pom.xml
+++ b/tsfile/pom.xml
@@ -24,7 +24,7 @@
org.apache.iotdbiotdb-parent
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmltsfile
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/statistics/Statistics.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/statistics/Statistics.java
index a9fcec1b2a8f4..ba42bccea918f 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/statistics/Statistics.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/statistics/Statistics.java
@@ -155,16 +155,19 @@ public int serialize(OutputStream outputStream) throws IOException {
@SuppressWarnings("unchecked")
public void mergeStatistics(Statistics extends Serializable> stats) {
if (this.getClass() == stats.getClass()) {
- if (stats.startTime < this.startTime) {
- this.startTime = stats.startTime;
+ if (!stats.isEmpty) {
+ if (stats.startTime < this.startTime) {
+ this.startTime = stats.startTime;
+ }
+ if (stats.endTime > this.endTime) {
+ this.endTime = stats.endTime;
+ }
+
+ // must be sure no overlap between two statistics
+ this.count += stats.count;
+ mergeStatisticsValue((Statistics) stats);
+ isEmpty = false;
}
- if (stats.endTime > this.endTime) {
- this.endTime = stats.endTime;
- }
- // must be sure no overlap between two statistics
- this.count += stats.count;
- mergeStatisticsValue((Statistics) stats);
- isEmpty = false;
} else {
Class> thisClass = this.getClass();
Class> statsClass = stats.getClass();
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/statistics/TimeStatistics.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/statistics/TimeStatistics.java
index adc967ecc7051..33fcad15cb472 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/statistics/TimeStatistics.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/statistics/TimeStatistics.java
@@ -45,6 +45,20 @@ public int getStatsSize() {
return 0;
}
+ @Override
+ public void update(long time) {
+ super.update(time);
+ setEmpty(false);
+ }
+
+ @Override
+ public void update(long[] time, int batchSize) {
+ super.update(time, batchSize);
+ if (batchSize > 0) {
+ setEmpty(false);
+ }
+ }
+
@Override
public Long getMinValue() {
throw new StatisticsClassException(String.format(STATS_UNSUPPORTED_MSG, TIME, "min value"));
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/TsFileAlignedSeriesReaderIterator.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/TsFileAlignedSeriesReaderIterator.java
index 015166a682bd8..7236d3c99941d 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/TsFileAlignedSeriesReaderIterator.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/TsFileAlignedSeriesReaderIterator.java
@@ -24,6 +24,7 @@
import org.apache.iotdb.tsfile.file.metadata.IChunkMetadata;
import org.apache.iotdb.tsfile.read.common.Chunk;
import org.apache.iotdb.tsfile.read.reader.chunk.AlignedChunkReader;
+import org.apache.iotdb.tsfile.utils.Pair;
import org.apache.iotdb.tsfile.write.schema.IMeasurementSchema;
import java.io.IOException;
@@ -55,13 +56,14 @@ public boolean hasNext() {
return curIdx < alignedChunkMetadataList.size() - 1;
}
- public AlignedChunkReader nextReader() throws IOException {
+ public Pair nextReader() throws IOException {
AlignedChunkMetadata alignedChunkMetadata = alignedChunkMetadataList.get(++curIdx);
IChunkMetadata timeChunkMetadata = alignedChunkMetadata.getTimeChunkMetadata();
List valueChunkMetadataList = alignedChunkMetadata.getValueChunkMetadataList();
int schemaIdx = 0;
Chunk timeChunk = reader.readMemChunk((ChunkMetadata) timeChunkMetadata);
Chunk[] valueChunks = new Chunk[schemaList.size()];
+ long totalSize = 0;
for (IChunkMetadata valueChunkMetadata : valueChunkMetadataList) {
if (valueChunkMetadata == null) {
continue;
@@ -71,12 +73,14 @@ public AlignedChunkReader nextReader() throws IOException {
.equals(schemaList.get(schemaIdx).getMeasurementId())) {
schemaIdx++;
}
- valueChunks[schemaIdx++] = reader.readMemChunk((ChunkMetadata) valueChunkMetadata);
+ Chunk chunk = reader.readMemChunk((ChunkMetadata) valueChunkMetadata);
+ valueChunks[schemaIdx++] = chunk;
+ totalSize += chunk.getHeader().getSerializedSize() + chunk.getHeader().getDataSize();
}
AlignedChunkReader chunkReader =
new AlignedChunkReader(timeChunk, Arrays.asList(valueChunks), null);
- return chunkReader;
+ return new Pair<>(chunkReader, totalSize);
}
}
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/TsFileSequenceReader.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/TsFileSequenceReader.java
index 88834f121b48c..75c01fdbd64bf 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/TsFileSequenceReader.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/TsFileSequenceReader.java
@@ -67,7 +67,6 @@
import java.io.File;
import java.io.IOException;
import java.io.Serializable;
-import java.nio.BufferOverflowException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
@@ -273,7 +272,7 @@ public TsFileMetadata readFileMetadata() throws IOException {
tsFileMetaData =
TsFileMetadata.deserializeFrom(readData(fileMetadataPos, fileMetadataSize));
}
- } catch (BufferOverflowException e) {
+ } catch (Exception e) {
logger.error("Something error happened while reading file metadata of file {}", file);
throw e;
}
@@ -354,7 +353,7 @@ public TimeseriesMetadata readTimeseriesMetadata(Path path, boolean ignoreNotExi
if (!metadataIndexNode.getNodeType().equals(MetadataIndexNodeType.LEAF_MEASUREMENT)) {
try {
metadataIndexNode = MetadataIndexNode.deserializeFrom(buffer);
- } catch (BufferOverflowException e) {
+ } catch (Exception e) {
logger.error(METADATA_INDEX_NODE_DESERIALIZE_ERROR, file);
throw e;
}
@@ -369,7 +368,7 @@ public TimeseriesMetadata readTimeseriesMetadata(Path path, boolean ignoreNotExi
while (buffer.hasRemaining()) {
try {
timeseriesMetadataList.add(TimeseriesMetadata.deserializeFrom(buffer, true));
- } catch (BufferOverflowException e) {
+ } catch (Exception e) {
logger.error(
"Something error happened while deserializing TimeseriesMetadata of file {}", file);
throw e;
@@ -400,7 +399,7 @@ public ITimeSeriesMetadata readITimeseriesMetadata(Path path, boolean ignoreNotE
try {
// next layer MeasurementNode of the specific DeviceNode
metadataIndexNode = MetadataIndexNode.deserializeFrom(buffer);
- } catch (BufferOverflowException e) {
+ } catch (Exception e) {
logger.error(METADATA_INDEX_NODE_DESERIALIZE_ERROR, file);
throw e;
}
@@ -416,7 +415,7 @@ public ITimeSeriesMetadata readITimeseriesMetadata(Path path, boolean ignoreNotE
while (buffer.hasRemaining()) {
try {
timeseriesMetadataList.add(TimeseriesMetadata.deserializeFrom(buffer, true));
- } catch (BufferOverflowException e) {
+ } catch (Exception e) {
logger.error(
"Something error happened while deserializing TimeseriesMetadata of file {}", file);
throw e;
@@ -452,7 +451,7 @@ public List readTimeseriesMetadata(Path path, Set al
TimeseriesMetadata timeseriesMetadata;
try {
timeseriesMetadata = TimeseriesMetadata.deserializeFrom(buffer, true);
- } catch (BufferOverflowException e) {
+ } catch (Exception e) {
logger.error(
"Something error happened while deserializing TimeseriesMetadata of file {}", file);
throw e;
@@ -478,7 +477,7 @@ private Pair getLeafMetadataIndexPair(Path path) throw
if (!metadataIndexNode.getNodeType().equals(MetadataIndexNodeType.LEAF_MEASUREMENT)) {
try {
metadataIndexNode = MetadataIndexNode.deserializeFrom(buffer);
- } catch (BufferOverflowException e) {
+ } catch (Exception e) {
logger.error(METADATA_INDEX_NODE_DESERIALIZE_ERROR, file);
throw e;
}
@@ -510,7 +509,7 @@ public List readITimeseriesMetadata(String device, Set readITimeseriesMetadata(String device, Set getMetadataAndEndOffset(
return getMetadataAndEndOffset(
MetadataIndexNode.deserializeFrom(buffer), name, isDeviceLevel, exactSearch);
}
- } catch (BufferOverflowException e) {
+ } catch (Exception e) {
logger.error("Something error happened while deserializing MetadataIndex of file {}", file);
throw e;
}
@@ -1033,7 +1032,12 @@ public void readPlanIndex() throws IOException {
* @throws IOException io error
*/
public ChunkHeader readChunkHeader(byte chunkType) throws IOException {
- return ChunkHeader.deserializeFrom(tsFileInput.wrapAsInputStream(), chunkType);
+ try {
+ return ChunkHeader.deserializeFrom(tsFileInput.wrapAsInputStream(), chunkType);
+ } catch (Throwable t) {
+ logger.error("Exception happened while reading chunk header of {}", file, t);
+ throw t;
+ }
}
/**
@@ -1043,7 +1047,12 @@ public ChunkHeader readChunkHeader(byte chunkType) throws IOException {
* @param chunkHeaderSize the size of chunk's header
*/
private ChunkHeader readChunkHeader(long position, int chunkHeaderSize) throws IOException {
- return ChunkHeader.deserializeFrom(tsFileInput, position, chunkHeaderSize);
+ try {
+ return ChunkHeader.deserializeFrom(tsFileInput, position, chunkHeaderSize);
+ } catch (Throwable t) {
+ logger.error("Exception happened while reading chunk header of {}", file, t);
+ throw t;
+ }
}
/**
@@ -1054,7 +1063,12 @@ private ChunkHeader readChunkHeader(long position, int chunkHeaderSize) throws I
* @return the pages of this chunk
*/
private ByteBuffer readChunk(long position, int dataSize) throws IOException {
- return readData(position, dataSize);
+ try {
+ return readData(position, dataSize);
+ } catch (Throwable t) {
+ logger.error("Exception happened while reading chunk of {}", file, t);
+ throw t;
+ }
}
/**
@@ -1064,12 +1078,17 @@ private ByteBuffer readChunk(long position, int dataSize) throws IOException {
* @return -chunk
*/
public Chunk readMemChunk(ChunkMetadata metaData) throws IOException {
- int chunkHeadSize = ChunkHeader.getSerializedSize(metaData.getMeasurementUid());
- ChunkHeader header = readChunkHeader(metaData.getOffsetOfChunkHeader(), chunkHeadSize);
- ByteBuffer buffer =
- readChunk(
- metaData.getOffsetOfChunkHeader() + header.getSerializedSize(), header.getDataSize());
- return new Chunk(header, buffer, metaData.getDeleteIntervalList(), metaData.getStatistics());
+ try {
+ int chunkHeadSize = ChunkHeader.getSerializedSize(metaData.getMeasurementUid());
+ ChunkHeader header = readChunkHeader(metaData.getOffsetOfChunkHeader(), chunkHeadSize);
+ ByteBuffer buffer =
+ readChunk(
+ metaData.getOffsetOfChunkHeader() + header.getSerializedSize(), header.getDataSize());
+ return new Chunk(header, buffer, metaData.getDeleteIntervalList(), metaData.getStatistics());
+ } catch (Throwable t) {
+ logger.error("Exception happened while reading chunk of {}", file, t);
+ throw t;
+ }
}
/**
@@ -1095,7 +1114,12 @@ public Chunk readMemChunk(CachedChunkLoaderImpl.ChunkCacheKey chunkCacheKey) thr
* @param type given tsfile data type
*/
public PageHeader readPageHeader(TSDataType type, boolean hasStatistic) throws IOException {
- return PageHeader.deserializeFrom(tsFileInput.wrapAsInputStream(), type, hasStatistic);
+ try {
+ return PageHeader.deserializeFrom(tsFileInput.wrapAsInputStream(), type, hasStatistic);
+ } catch (Throwable t) {
+ logger.error("Exception happened while reading page header of {}", file, t);
+ throw t;
+ }
}
public long position() throws IOException {
@@ -1208,7 +1232,12 @@ protected ByteBuffer readData(long position, int totalSize) throws IOException {
* @return data that been read.
*/
protected ByteBuffer readData(long start, long end) throws IOException {
- return readData(start, (int) (end - start));
+ try {
+ return readData(start, (int) (end - start));
+ } catch (Throwable t) {
+ logger.error("Exception happened while reading data of {}", file, t);
+ throw t;
+ }
}
/** notice, the target bytebuffer are not flipped. */
@@ -1257,15 +1286,16 @@ public long selfCheck(
}
tsFileInput.position(headerLength);
+ boolean isComplete = isComplete();
if (fileSize == headerLength) {
return headerLength;
- } else if (isComplete()) {
+ } else if (isComplete) {
loadMetadataSize();
if (fastFinish) {
return TsFileCheckStatus.COMPLETE_FILE;
}
}
- // not a complete file, we will recover it...
+ // if not a complete file, we will recover it...
long truncatedSize = headerLength;
byte marker;
List timeBatch = new ArrayList<>();
@@ -1309,7 +1339,10 @@ public long selfCheck(
while (dataSize > 0) {
// a new Page
PageHeader pageHeader = this.readPageHeader(chunkHeader.getDataType(), true);
- chunkStatistics.mergeStatistics(pageHeader.getStatistics());
+ if (pageHeader.getUncompressedSize() != 0) {
+ // not empty page
+ chunkStatistics.mergeStatistics(pageHeader.getStatistics());
+ }
this.skipPageData(pageHeader);
dataSize -= pageHeader.getSerializedPageSize();
chunkHeader.increasePageNums(1);
@@ -1476,9 +1509,13 @@ public long selfCheck(
// last chunk group Metadata
chunkGroupMetadataList.add(new ChunkGroupMetadata(lastDeviceId, chunkMetadataList));
}
- truncatedSize = this.position() - 1;
+ if (isComplete) {
+ truncatedSize = TsFileCheckStatus.COMPLETE_FILE;
+ } else {
+ truncatedSize = this.position() - 1;
+ }
} catch (Exception e) {
- logger.info(
+ logger.warn(
"TsFile {} self-check cannot proceed at position {} " + "recovered, because : {}",
file,
this.position(),
@@ -1674,7 +1711,7 @@ public List getAlignedChunkMetadata(String device) throws
try {
// next layer MeasurementNode of the specific DeviceNode
metadataIndexNode = MetadataIndexNode.deserializeFrom(buffer);
- } catch (BufferOverflowException e) {
+ } catch (Exception e) {
logger.error(METADATA_INDEX_NODE_DESERIALIZE_ERROR, file);
throw e;
}
@@ -1949,7 +1986,7 @@ private void collectEachLeafMeasurementNodeOffsetRange(
}
collectEachLeafMeasurementNodeOffsetRange(readData(startOffset, endOffset), queue);
}
- } catch (BufferOverflowException e) {
+ } catch (Exception e) {
logger.error(
"Error occurred while collecting offset ranges of measurement nodes of file {}", file);
throw e;
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/common/BatchData.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/common/BatchData.java
index 3ff8c5f7ac1b1..7b22ebf7d16f7 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/common/BatchData.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/common/BatchData.java
@@ -37,6 +37,7 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
+import java.util.function.Predicate;
/**
* BatchData is a self-defined data structure which is optimized for different type of
@@ -808,6 +809,11 @@ public boolean hasNext(long minBound, long maxBound) {
return hasNext();
}
+ @Override
+ public boolean hasNext(Predicate boundPredicate) {
+ return hasNext();
+ }
+
@Override
public void next() {
BatchData.this.next();
@@ -881,6 +887,17 @@ public boolean hasNext(long minBound, long maxBound) {
return BatchData.this.hasCurrent();
}
+ @Override
+ public boolean hasNext(Predicate boundPredicate) {
+ while (BatchData.this.hasCurrent() && currentValue() == null) {
+ if (boundPredicate.test(currentTime())) {
+ break;
+ }
+ super.next();
+ }
+ return BatchData.this.hasCurrent();
+ }
+
@Override
public Object currentValue() {
TsPrimitiveType v = getVector()[subIndex];
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/common/IBatchDataIterator.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/common/IBatchDataIterator.java
index 0761c1d00ce92..ad2ef98765dc3 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/common/IBatchDataIterator.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/common/IBatchDataIterator.java
@@ -19,12 +19,24 @@
package org.apache.iotdb.tsfile.read.common;
+import java.util.function.Predicate;
+
public interface IBatchDataIterator {
boolean hasNext();
boolean hasNext(long minBound, long maxBound);
+ /**
+ * Determine whether there is a non-null value in the current time window. This method is used in
+ * GROUP BY aggregation query.
+ *
+ * @param boundPredicate A predicate used to judge whether the current timestamp is out of time
+ * range, returns true if it is. This predicate guarantees that the current time of batchData
+ * will not out of time range when a sensor's values are all null.
+ */
+ boolean hasNext(Predicate boundPredicate);
+
void next();
long currentTime();
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/utils/MeasurementGroup.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/utils/MeasurementGroup.java
index e4306b7cc835a..21cf4c8b85880 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/utils/MeasurementGroup.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/utils/MeasurementGroup.java
@@ -20,11 +20,12 @@
import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
+import java.io.Serializable;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-public class MeasurementGroup {
+public class MeasurementGroup implements Serializable {
private boolean isAligned;
private Map measurementSchemaMap;
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/write/TsFileWriter.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/write/TsFileWriter.java
index f2ced038c92b8..ec2ef2baf572a 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/write/TsFileWriter.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/write/TsFileWriter.java
@@ -636,8 +636,4 @@ public void close() throws IOException {
public TsFileIOWriter getIOWriter() {
return this.fileWriter;
}
-
- public void setIsUnseq(boolean unseq) {
- this.isUnseq = unseq;
- }
}
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/write/chunk/AlignedChunkGroupWriterImpl.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/write/chunk/AlignedChunkGroupWriterImpl.java
index 6c69d3aaee1e8..79d9ed271e594 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/write/chunk/AlignedChunkGroupWriterImpl.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/write/chunk/AlignedChunkGroupWriterImpl.java
@@ -37,7 +37,12 @@
import org.slf4j.LoggerFactory;
import java.io.IOException;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
public class AlignedChunkGroupWriterImpl implements IChunkGroupWriter {
private static final Logger LOG = LoggerFactory.getLogger(AlignedChunkGroupWriterImpl.class);
@@ -150,7 +155,7 @@ public int write(Tablet tablet) throws WriteProcessException, IOException {
// check isNull by bitMap in tablet
if (tablet.bitMaps != null
&& tablet.bitMaps[columnIndex] != null
- && !tablet.bitMaps[columnIndex].isMarked(row)) {
+ && tablet.bitMaps[columnIndex].isMarked(row)) {
isNull = true;
}
ValueChunkWriter valueChunkWriter =
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/write/chunk/NonAlignedChunkGroupWriterImpl.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/write/chunk/NonAlignedChunkGroupWriterImpl.java
index 8b6038e4780a6..f8063271b4a81 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/write/chunk/NonAlignedChunkGroupWriterImpl.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/write/chunk/NonAlignedChunkGroupWriterImpl.java
@@ -94,6 +94,12 @@ public int write(Tablet tablet) throws WriteProcessException {
long time = tablet.timestamps[row];
boolean hasOneColumnWritten = false;
for (int column = 0; column < timeseries.size(); column++) {
+ // check isNull in tablet
+ if (tablet.bitMaps != null
+ && tablet.bitMaps[column] != null
+ && tablet.bitMaps[column].isMarked(row)) {
+ continue;
+ }
String measurementId = timeseries.get(column).getMeasurementId();
checkIsHistoryData(measurementId, time);
hasOneColumnWritten = true;
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/write/chunk/ValueChunkWriter.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/write/chunk/ValueChunkWriter.java
index 9ea33101164d4..3ece00b40304f 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/write/chunk/ValueChunkWriter.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/write/chunk/ValueChunkWriter.java
@@ -207,13 +207,19 @@ public long estimateMaxSeriesMemSize() {
public long getCurrentChunkSize() {
/**
- * It may happen if pageBuffer stores empty bits and subsequent write operations are all out of
- * order, then count of statistics in this chunk will be 0 and this chunk will not be flushed.
+ * It may happen if subsequent write operations are all out of order, then count of statistics
+ * in this chunk will be 0 and this chunk will not be flushed.
*/
- if (pageBuffer.size() == 0 || statistics.getCount() == 0) {
+ if (pageBuffer.size() == 0) {
return 0;
}
+ // Empty chunk, it may happen if pageBuffer stores empty bits and only chunk header will be
+ // flushed.
+ if (statistics.getCount() == 0) {
+ return ChunkHeader.getSerializedSize(measurementId, pageBuffer.size());
+ }
+
// return the serialized size of the chunk header + all pages
return ChunkHeader.getSerializedSize(measurementId, pageBuffer.size())
+ (long) pageBuffer.size();
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/write/record/Tablet.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/write/record/Tablet.java
index 9cfabc1056478..dd919f448cc4b 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/write/record/Tablet.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/write/record/Tablet.java
@@ -110,6 +110,13 @@ public void setSchemas(List schemas) {
this.schemas = schemas;
}
+ public void initBitMaps() {
+ this.bitMaps = new BitMap[schemas.size()];
+ for (int column = 0; column < schemas.size(); column++) {
+ this.bitMaps[column] = new BitMap(getMaxRowNumber());
+ }
+ }
+
public void addTimestamp(int rowIndex, long timestamp) {
timestamps[rowIndex] = timestamp;
}
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/write/writer/RestorableTsFileIOWriter.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/write/writer/RestorableTsFileIOWriter.java
index 2ea48fafb825f..70a5d8cf9fb72 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/write/writer/RestorableTsFileIOWriter.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/write/writer/RestorableTsFileIOWriter.java
@@ -75,42 +75,7 @@ public class RestorableTsFileIOWriter extends TsFileIOWriter {
* @throws IOException if write failed, or the file is broken but autoRepair==false.
*/
public RestorableTsFileIOWriter(File file) throws IOException {
- if (logger.isDebugEnabled()) {
- logger.debug("{} is opened.", file.getName());
- }
- this.file = file;
- this.out = FSFactoryProducer.getFileOutputFactory().getTsFileOutput(file.getPath(), true);
-
- // file doesn't exist
- if (file.length() == 0) {
- startFile();
- crashed = true;
- canWrite = true;
- return;
- }
-
- if (file.exists()) {
- try (TsFileSequenceReader reader = new TsFileSequenceReader(file.getAbsolutePath(), false)) {
-
- truncatedSize = reader.selfCheck(knownSchemas, chunkGroupMetadataList, true);
- minPlanIndex = reader.getMinPlanIndex();
- maxPlanIndex = reader.getMaxPlanIndex();
- if (truncatedSize == TsFileCheckStatus.COMPLETE_FILE) {
- crashed = false;
- canWrite = false;
- out.close();
- } else if (truncatedSize == TsFileCheckStatus.INCOMPATIBLE_FILE) {
- out.close();
- throw new NotCompatibleTsFileException(
- String.format("%s is not in TsFile format.", file.getAbsolutePath()));
- } else {
- crashed = true;
- canWrite = true;
- // remove broken data
- out.truncate(truncatedSize);
- }
- }
- }
+ this(file, true);
}
public RestorableTsFileIOWriter(File file, boolean truncate) throws IOException {
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/write/writer/TsFileIOWriter.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/write/writer/TsFileIOWriter.java
index 7eb9a95110778..2f865f297f081 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/write/writer/TsFileIOWriter.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/write/writer/TsFileIOWriter.java
@@ -142,14 +142,14 @@ protected void startFile() throws IOException {
out.write(VERSION_NUMBER_BYTE);
}
- public void startChunkGroup(String deviceId) throws IOException {
+ public int startChunkGroup(String deviceId) throws IOException {
this.currentChunkGroupDeviceId = deviceId;
if (logger.isDebugEnabled()) {
logger.debug("start chunk group:{}, file position {}", deviceId, out.getPosition());
}
chunkMetadataList = new ArrayList<>();
ChunkGroupHeader chunkGroupHeader = new ChunkGroupHeader(currentChunkGroupDeviceId);
- chunkGroupHeader.serializeTo(out.wrapAsStream());
+ return chunkGroupHeader.serializeTo(out.wrapAsStream());
}
/**
@@ -458,6 +458,10 @@ public void writePlanIndices() throws IOException {
out.flush();
}
+ public void truncate(long offset) throws IOException {
+ out.truncate(offset);
+ }
+
/**
* this function is only for Test.
*
diff --git a/tsfile/src/test/java/org/apache/iotdb/tsfile/read/TsFileSequenceReaderTest.java b/tsfile/src/test/java/org/apache/iotdb/tsfile/read/TsFileSequenceReaderTest.java
index d54e4be7e0815..a69eec5bfbd22 100644
--- a/tsfile/src/test/java/org/apache/iotdb/tsfile/read/TsFileSequenceReaderTest.java
+++ b/tsfile/src/test/java/org/apache/iotdb/tsfile/read/TsFileSequenceReaderTest.java
@@ -20,19 +20,28 @@
package org.apache.iotdb.tsfile.read;
import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
+import org.apache.iotdb.tsfile.common.conf.TSFileDescriptor;
+import org.apache.iotdb.tsfile.exception.write.WriteProcessException;
import org.apache.iotdb.tsfile.file.MetaMarker;
import org.apache.iotdb.tsfile.file.header.ChunkGroupHeader;
import org.apache.iotdb.tsfile.file.header.ChunkHeader;
import org.apache.iotdb.tsfile.file.header.PageHeader;
import org.apache.iotdb.tsfile.file.metadata.ChunkMetadata;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
+import org.apache.iotdb.tsfile.read.common.Path;
import org.apache.iotdb.tsfile.utils.FileGenerator;
import org.apache.iotdb.tsfile.utils.Pair;
+import org.apache.iotdb.tsfile.utils.TsFileGeneratorUtils;
+import org.apache.iotdb.tsfile.write.TsFileWriter;
+import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
+import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
@@ -125,4 +134,42 @@ public void testReadChunkMetadataInDevice() throws IOException {
Assert.assertTrue(reader.readChunkMetadataInDevice("d3").isEmpty());
reader.close();
}
+
+ @Test
+ public void testReadEmptyPageInSelfCheck() throws IOException, WriteProcessException {
+ int oldMaxPagePointNum =
+ TSFileDescriptor.getInstance().getConfig().getMaxNumberOfPointsInPage();
+ TSFileDescriptor.getInstance().getConfig().setMaxNumberOfPointsInPage(10);
+ File testFile = new File(FILE_PATH);
+
+ // create tsfile with empty page
+ try (TsFileWriter tsFileWriter = new TsFileWriter(testFile)) {
+ // register aligned timeseries
+ List alignedMeasurementSchemas = new ArrayList<>();
+ alignedMeasurementSchemas.add(
+ new MeasurementSchema("s1", TSDataType.INT64, TSEncoding.PLAIN));
+ alignedMeasurementSchemas.add(
+ new MeasurementSchema("s2", TSDataType.INT64, TSEncoding.PLAIN));
+ tsFileWriter.registerAlignedTimeseries(new Path("d1"), alignedMeasurementSchemas);
+
+ List writeMeasurementScheams = new ArrayList<>();
+ // only write s1
+ writeMeasurementScheams.add(alignedMeasurementSchemas.get(0));
+ TsFileGeneratorUtils.writeWithTsRecord(
+ tsFileWriter, "d1", writeMeasurementScheams, 25, 0, 0, true);
+
+ // write s1 and s2, fill 2 empty pages for s2
+ writeMeasurementScheams.add(alignedMeasurementSchemas.get(1));
+ TsFileGeneratorUtils.writeWithTsRecord(
+ tsFileWriter, "d1", writeMeasurementScheams, 10, 25, 0, true);
+ } finally {
+ TSFileDescriptor.getInstance().getConfig().setMaxNumberOfPointsInPage(oldMaxPagePointNum);
+ }
+
+ // read tsfile with selfCheck method
+ TsFileSequenceReader reader = new TsFileSequenceReader(FILE_PATH);
+ Assert.assertEquals(
+ TsFileCheckStatus.COMPLETE_FILE,
+ reader.selfCheck(new HashMap<>(), new ArrayList<>(), false));
+ }
}
diff --git a/tsfile/src/test/java/org/apache/iotdb/tsfile/write/TsFileWriteApiTest.java b/tsfile/src/test/java/org/apache/iotdb/tsfile/write/TsFileWriteApiTest.java
index 6f5c8eaa98cc7..cafa4a0f63b32 100644
--- a/tsfile/src/test/java/org/apache/iotdb/tsfile/write/TsFileWriteApiTest.java
+++ b/tsfile/src/test/java/org/apache/iotdb/tsfile/write/TsFileWriteApiTest.java
@@ -24,7 +24,9 @@
import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
import org.apache.iotdb.tsfile.fileSystem.FSFactoryProducer;
import org.apache.iotdb.tsfile.read.common.Path;
+import org.apache.iotdb.tsfile.utils.Binary;
import org.apache.iotdb.tsfile.utils.TsFileGeneratorUtils;
+import org.apache.iotdb.tsfile.write.record.Tablet;
import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
import org.junit.After;
@@ -42,6 +44,9 @@ public class TsFileWriteApiTest {
private final String deviceId = "root.sg.d1";
private final List alignedMeasurementSchemas = new ArrayList<>();
private final List measurementSchemas = new ArrayList<>();
+ private int oldChunkGroupSize = TSFileDescriptor.getInstance().getConfig().getGroupSizeInByte();
+ private int oldMaxNumOfPointsInPage =
+ TSFileDescriptor.getInstance().getConfig().getMaxNumberOfPointsInPage();
@Before
public void setUp() {
@@ -53,6 +58,8 @@ public void setUp() {
@After
public void end() {
if (f.exists()) f.delete();
+ TSFileDescriptor.getInstance().getConfig().setMaxNumberOfPointsInPage(oldMaxNumOfPointsInPage);
+ TSFileDescriptor.getInstance().getConfig().setGroupSizeInByte(oldChunkGroupSize);
}
private void setEnv(int chunkGroupSize, int pageSize) {
@@ -324,4 +331,96 @@ public void writeOutOfOrderData() throws IOException, WriteProcessException {
}
}
}
+
+ @Test
+ public void writeNonAlignedWithTabletWithNullValue() {
+ setEnv(100, 30);
+ try (TsFileWriter tsFileWriter = new TsFileWriter(f)) {
+ measurementSchemas.add(new MeasurementSchema("s1", TSDataType.TEXT, TSEncoding.PLAIN));
+ measurementSchemas.add(new MeasurementSchema("s2", TSDataType.TEXT, TSEncoding.PLAIN));
+ measurementSchemas.add(new MeasurementSchema("s3", TSDataType.TEXT, TSEncoding.PLAIN));
+
+ // register nonAligned timeseries
+ tsFileWriter.registerTimeseries(new Path(deviceId), measurementSchemas);
+
+ Tablet tablet = new Tablet(deviceId, measurementSchemas);
+ long[] timestamps = tablet.timestamps;
+ Object[] values = tablet.values;
+ tablet.initBitMaps();
+ long sensorNum = measurementSchemas.size();
+ long startTime = 0;
+ for (long r = 0; r < 10000; r++) {
+ int row = tablet.rowSize++;
+ timestamps[row] = startTime++;
+ for (int i = 0; i < sensorNum; i++) {
+ if (i == 1 && r > 1000) {
+ tablet.bitMaps[i].mark((int) r % tablet.getMaxRowNumber());
+ continue;
+ }
+ Binary[] textSensor = (Binary[]) values[i];
+ textSensor[row] = new Binary("testString.........");
+ }
+ // write
+ if (tablet.rowSize == tablet.getMaxRowNumber()) {
+ tsFileWriter.write(tablet);
+ tablet.reset();
+ }
+ }
+ // write
+ if (tablet.rowSize != 0) {
+ tsFileWriter.write(tablet);
+ tablet.reset();
+ }
+
+ } catch (Throwable e) {
+ e.printStackTrace();
+ Assert.fail("Meet errors in test: " + e.getMessage());
+ }
+ }
+
+ @Test
+ public void writeAlignedWithTabletWithNullValue() {
+ setEnv(100, 30);
+ try (TsFileWriter tsFileWriter = new TsFileWriter(f)) {
+ measurementSchemas.add(new MeasurementSchema("s1", TSDataType.TEXT, TSEncoding.PLAIN));
+ measurementSchemas.add(new MeasurementSchema("s2", TSDataType.TEXT, TSEncoding.PLAIN));
+ measurementSchemas.add(new MeasurementSchema("s3", TSDataType.TEXT, TSEncoding.PLAIN));
+
+ // register aligned timeseries
+ tsFileWriter.registerAlignedTimeseries(new Path(deviceId), measurementSchemas);
+
+ Tablet tablet = new Tablet(deviceId, measurementSchemas);
+ long[] timestamps = tablet.timestamps;
+ Object[] values = tablet.values;
+ tablet.initBitMaps();
+ long sensorNum = measurementSchemas.size();
+ long startTime = 0;
+ for (long r = 0; r < 10000; r++) {
+ int row = tablet.rowSize++;
+ timestamps[row] = startTime++;
+ for (int i = 0; i < sensorNum; i++) {
+ if (i == 1 && r > 1000) {
+ tablet.bitMaps[i].mark((int) r % tablet.getMaxRowNumber());
+ continue;
+ }
+ Binary[] textSensor = (Binary[]) values[i];
+ textSensor[row] = new Binary("testString.........");
+ }
+ // write
+ if (tablet.rowSize == tablet.getMaxRowNumber()) {
+ tsFileWriter.writeAligned(tablet);
+ tablet.reset();
+ }
+ }
+ // write
+ if (tablet.rowSize != 0) {
+ tsFileWriter.writeAligned(tablet);
+ tablet.reset();
+ }
+
+ } catch (Throwable e) {
+ e.printStackTrace();
+ Assert.fail("Meet errors in test: " + e.getMessage());
+ }
+ }
}
diff --git a/zeppelin-interpreter/pom.xml b/zeppelin-interpreter/pom.xml
index c870cd75ae030..432a7c054162b 100644
--- a/zeppelin-interpreter/pom.xml
+++ b/zeppelin-interpreter/pom.xml
@@ -24,7 +24,7 @@
org.apache.iotdbiotdb-parent
- 0.13.0-SNAPSHOT
+ 0.13.1-SNAPSHOT../pom.xmlzeppelin-iotdb