hash
stringlengths 40
40
| date
stringdate 2022-04-19 15:26:27
2025-03-21 10:49:23
| author
stringclasses 86
values | commit_message
stringlengths 12
115
| is_merge
bool 1
class | git_diff
stringlengths 214
553k
⌀ | type
stringclasses 15
values | masked_commit_message
stringlengths 8
110
|
|---|---|---|---|---|---|---|---|
1ec595134d91936bf2d985712ba8db9146e394dc
|
2024-04-25 18:00:31
|
Weny Xu
|
feat: define `CreateFlowTask` and `DropFlowTask` (#3801)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 4671cbf69881..6a209e795c8b 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3866,7 +3866,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "greptime-proto"
version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=73ac0207ab71dfea48f30259ffdb611501b5ecb8#73ac0207ab71dfea48f30259ffdb611501b5ecb8"
+source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=783682fabc38c57b5b9d46bdcfeebe2496e85bbb#783682fabc38c57b5b9d46bdcfeebe2496e85bbb"
dependencies = [
"prost 0.12.4",
"serde",
diff --git a/Cargo.toml b/Cargo.toml
index a74099fa4cfc..78b89a5e1707 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -115,7 +115,7 @@ etcd-client = { git = "https://github.com/MichaelScofield/etcd-client.git", rev
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
-greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "73ac0207ab71dfea48f30259ffdb611501b5ecb8" }
+greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "783682fabc38c57b5b9d46bdcfeebe2496e85bbb" }
humantime = "2.1"
humantime-serde = "1.1"
itertools = "0.10"
diff --git a/src/api/src/helper.rs b/src/api/src/helper.rs
index c71bb0795cb3..ec43253f39d8 100644
--- a/src/api/src/helper.rs
+++ b/src/api/src/helper.rs
@@ -518,6 +518,8 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
Some(Expr::Alter(_)) => "ddl.alter",
Some(Expr::DropTable(_)) => "ddl.drop_table",
Some(Expr::TruncateTable(_)) => "ddl.truncate_table",
+ Some(Expr::CreateFlowTask(_)) => "ddl.create_flow_task",
+ Some(Expr::DropFlowTask(_)) => "ddl.drop_flow_task",
None => "ddl.empty",
}
}
diff --git a/src/common/meta/src/rpc/ddl.rs b/src/common/meta/src/rpc/ddl.rs
index f48e2f6486da..a7e14161ecc6 100644
--- a/src/common/meta/src/rpc/ddl.rs
+++ b/src/common/meta/src/rpc/ddl.rs
@@ -18,15 +18,16 @@ use std::result;
use api::v1::meta::ddl_task_request::Task;
use api::v1::meta::{
AlterTableTask as PbAlterTableTask, AlterTableTasks as PbAlterTableTasks,
- CreateDatabaseTask as PbCreateDatabaseTask, CreateTableTask as PbCreateTableTask,
- CreateTableTasks as PbCreateTableTasks, DdlTaskRequest as PbDdlTaskRequest,
- DdlTaskResponse as PbDdlTaskResponse, DropDatabaseTask as PbDropDatabaseTask,
+ CreateDatabaseTask as PbCreateDatabaseTask, CreateFlowTask as PbCreateFlowTask,
+ CreateTableTask as PbCreateTableTask, CreateTableTasks as PbCreateTableTasks,
+ DdlTaskRequest as PbDdlTaskRequest, DdlTaskResponse as PbDdlTaskResponse,
+ DropDatabaseTask as PbDropDatabaseTask, DropFlowTask as PbDropFlowTask,
DropTableTask as PbDropTableTask, DropTableTasks as PbDropTableTasks, Partition, ProcedureId,
TruncateTableTask as PbTruncateTableTask,
};
use api::v1::{
- AlterExpr, CreateDatabaseExpr, CreateTableExpr, DropDatabaseExpr, DropTableExpr,
- TruncateTableExpr,
+ AlterExpr, CreateDatabaseExpr, CreateFlowTaskExpr, CreateTableExpr, DropDatabaseExpr,
+ DropFlowTaskExpr, DropTableExpr, TruncateTableExpr,
};
use base64::engine::general_purpose;
use base64::Engine as _;
@@ -181,6 +182,8 @@ impl TryFrom<Task> for DdlTask {
Task::DropDatabaseTask(drop_database) => {
Ok(DdlTask::DropDatabase(drop_database.try_into()?))
}
+ Task::CreateFlowTask(_) => unimplemented!(),
+ Task::DropFlowTask(_) => unimplemented!(),
}
}
}
@@ -720,6 +723,129 @@ impl TryFrom<DropDatabaseTask> for PbDropDatabaseTask {
}
}
+/// Create flow task
+pub struct CreateFlowTask {
+ pub catalog_name: String,
+ pub task_name: String,
+ pub source_table_names: Vec<TableName>,
+ pub sink_table_name: TableName,
+ pub or_replace: bool,
+ pub create_if_not_exists: bool,
+ pub expire_when: String,
+ pub comment: String,
+ pub sql: String,
+ pub options: HashMap<String, String>,
+}
+
+impl TryFrom<PbCreateFlowTask> for CreateFlowTask {
+ type Error = error::Error;
+
+ fn try_from(pb: PbCreateFlowTask) -> Result<Self> {
+ let CreateFlowTaskExpr {
+ catalog_name,
+ task_name,
+ source_table_names,
+ sink_table_name,
+ or_replace,
+ create_if_not_exists,
+ expire_when,
+ comment,
+ sql,
+ task_options,
+ } = pb.create_flow_task.context(error::InvalidProtoMsgSnafu {
+ err_msg: "expected create_flow_task",
+ })?;
+
+ Ok(CreateFlowTask {
+ catalog_name,
+ task_name,
+ source_table_names: source_table_names.into_iter().map(Into::into).collect(),
+ sink_table_name: sink_table_name
+ .context(error::InvalidProtoMsgSnafu {
+ err_msg: "expected sink_table_name",
+ })?
+ .into(),
+ or_replace,
+ create_if_not_exists,
+ expire_when,
+ comment,
+ sql,
+ options: task_options,
+ })
+ }
+}
+
+impl From<CreateFlowTask> for PbCreateFlowTask {
+ fn from(
+ CreateFlowTask {
+ catalog_name,
+ task_name,
+ source_table_names,
+ sink_table_name,
+ or_replace,
+ create_if_not_exists,
+ expire_when,
+ comment,
+ sql,
+ options,
+ }: CreateFlowTask,
+ ) -> Self {
+ PbCreateFlowTask {
+ create_flow_task: Some(CreateFlowTaskExpr {
+ catalog_name,
+ task_name,
+ source_table_names: source_table_names.into_iter().map(Into::into).collect(),
+ sink_table_name: Some(sink_table_name.into()),
+ or_replace,
+ create_if_not_exists,
+ expire_when,
+ comment,
+ sql,
+ task_options: options,
+ }),
+ }
+ }
+}
+
+/// Drop flow task
+pub struct DropFlowTask {
+ pub catalog_name: String,
+ pub task_name: String,
+}
+
+impl TryFrom<PbDropFlowTask> for DropFlowTask {
+ type Error = error::Error;
+
+ fn try_from(pb: PbDropFlowTask) -> Result<Self> {
+ let DropFlowTaskExpr {
+ catalog_name,
+ task_name,
+ } = pb.drop_flow_task.context(error::InvalidProtoMsgSnafu {
+ err_msg: "expected sink_table_name",
+ })?;
+ Ok(DropFlowTask {
+ catalog_name,
+ task_name,
+ })
+ }
+}
+
+impl From<DropFlowTask> for PbDropFlowTask {
+ fn from(
+ DropFlowTask {
+ catalog_name,
+ task_name,
+ }: DropFlowTask,
+ ) -> Self {
+ PbDropFlowTask {
+ drop_flow_task: Some(DropFlowTaskExpr {
+ catalog_name,
+ task_name,
+ }),
+ }
+ }
+}
+
#[cfg(test)]
mod tests {
use std::sync::Arc;
diff --git a/src/common/meta/src/table_name.rs b/src/common/meta/src/table_name.rs
index 62615e5c211b..645e6386df02 100644
--- a/src/common/meta/src/table_name.rs
+++ b/src/common/meta/src/table_name.rs
@@ -14,7 +14,7 @@
use std::fmt::{Display, Formatter};
-use api::v1::meta::TableName as PbTableName;
+use api::v1::TableName as PbTableName;
use serde::{Deserialize, Serialize};
use table::table_reference::TableReference;
diff --git a/src/frontend/src/instance/grpc.rs b/src/frontend/src/instance/grpc.rs
index 73ec35df5d49..551a7da85d31 100644
--- a/src/frontend/src/instance/grpc.rs
+++ b/src/frontend/src/instance/grpc.rs
@@ -109,7 +109,6 @@ impl GrpcQueryHandler for Instance {
match expr {
DdlExpr::CreateTable(mut expr) => {
- // TODO(weny): supports to create multiple region table.
let _ = self
.statement_executor
.create_table_inner(&mut expr, None, &ctx)
@@ -138,6 +137,12 @@ impl GrpcQueryHandler for Instance {
TableName::new(&expr.catalog_name, &expr.schema_name, &expr.table_name);
self.statement_executor.truncate_table(table_name).await?
}
+ DdlExpr::CreateFlowTask(_) => {
+ unimplemented!()
+ }
+ DdlExpr::DropFlowTask(_) => {
+ unimplemented!()
+ }
}
}
};
@@ -176,6 +181,16 @@ fn fill_catalog_and_schema_from_context(ddl_expr: &mut DdlExpr, ctx: &QueryConte
Expr::TruncateTable(expr) => {
check_and_fill!(expr);
}
+ Expr::CreateFlowTask(expr) => {
+ if expr.catalog_name.is_empty() {
+ expr.catalog_name = catalog.to_string();
+ }
+ }
+ Expr::DropFlowTask(expr) => {
+ if expr.catalog_name.is_empty() {
+ expr.catalog_name = catalog.to_string();
+ }
+ }
}
}
|
feat
|
define `CreateFlowTask` and `DropFlowTask` (#3801)
|
bee8323bae2a614035dd3687b78b32c3e46512e8
|
2023-07-04 17:19:12
|
Ruihang Xia
|
chore: bump sqlness to 0.5.0 (#1877)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index cf6085bdd8eb..b491895cf5e3 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -8940,8 +8940,9 @@ dependencies = [
[[package]]
name = "sqlness"
-version = "0.4.3"
-source = "git+https://github.com/CeresDB/sqlness.git?rev=a4663365795d2067eb53966c383e1bb0c89c7627#a4663365795d2067eb53966c383e1bb0c89c7627"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0860f149718809371602b42573693e1ed2b1d0aed35fe69e04e4e4e9918d81f7"
dependencies = [
"async-trait",
"derive_builder 0.11.2",
diff --git a/tests/cases/distributed/alter/rename_table.result b/tests/cases/distributed/alter/rename_table.result
index dbc77584e6a5..6e703c10102a 100644
--- a/tests/cases/distributed/alter/rename_table.result
+++ b/tests/cases/distributed/alter/rename_table.result
@@ -25,6 +25,7 @@ SELECT * from t;
| | 4 |
+---+---+
+-- TODO(LFC): Port test cases from standalone env when distribute rename table is implemented (#723).
ALTER TABLE t RENAME new_table;
Affected Rows: 0
@@ -33,6 +34,7 @@ DROP TABLE t;
Error: 4001(TableNotFound), Table not found: greptime.public.t
+-- TODO: this clause should success
-- SQLNESS REPLACE details.*
DROP TABLE new_table;
diff --git a/tests/cases/distributed/optimizer/filter_push_down.result b/tests/cases/distributed/optimizer/filter_push_down.result
index 6859a0b7ed7a..4c0635372314 100644
--- a/tests/cases/distributed/optimizer/filter_push_down.result
+++ b/tests/cases/distributed/optimizer/filter_push_down.result
@@ -180,6 +180,16 @@ SELECT i FROM (SELECT * FROM integers i1 UNION SELECT * FROM integers i2) a WHER
| 3 |
+---+
+-- TODO(LFC): Somehow the following SQL does not order by column 1 under new DataFusion occasionally. Should further investigate it. Comment it out temporarily.
+-- expected:
+-- +---+---+--------------+
+-- | a | b | ROW_NUMBER() |
+-- +---+---+--------------+
+-- | 1 | 1 | 1 |
+-- | 2 | 2 | 5 |
+-- | 3 | 3 | 9 |
+-- +---+---+--------------+
+-- SELECT * FROM (SELECT i1.i AS a, i2.i AS b, row_number() OVER (ORDER BY i1.i, i2.i) FROM integers i1, integers i2 WHERE i1.i IS NOT NULL AND i2.i IS NOT NULL) a1 WHERE a=b ORDER BY 1;
SELECT * FROM (SELECT 0=1 AS cond FROM integers i1, integers i2) a1 WHERE cond ORDER BY 1;
++
diff --git a/tests/cases/distributed/tql-explain-analyze/analyze.result b/tests/cases/distributed/tql-explain-analyze/analyze.result
index 4087943ce814..1cd0c5c988ad 100644
--- a/tests/cases/distributed/tql-explain-analyze/analyze.result
+++ b/tests/cases/distributed/tql-explain-analyze/analyze.result
@@ -2,10 +2,12 @@ CREATE TABLE test(i DOUBLE, j TIMESTAMP TIME INDEX, k STRING PRIMARY KEY);
Affected Rows: 0
+-- insert two points at 1ms and one point at 2ms
INSERT INTO test VALUES (1, 1, "a"), (1, 1, "b"), (2, 2, "a");
Affected Rows: 3
+-- analyze at 0s, 5s and 10s. No point at 0s.
-- SQLNESS REPLACE (metrics.*) REDACTED
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
-- SQLNESS REPLACE (-+) -
diff --git a/tests/cases/distributed/tql-explain-analyze/explain.result b/tests/cases/distributed/tql-explain-analyze/explain.result
index cac729473bf2..2be8b54bfa35 100644
--- a/tests/cases/distributed/tql-explain-analyze/explain.result
+++ b/tests/cases/distributed/tql-explain-analyze/explain.result
@@ -2,10 +2,12 @@ CREATE TABLE test(i DOUBLE, j TIMESTAMP TIME INDEX, k STRING PRIMARY KEY);
Affected Rows: 0
+-- insert two points at 1ms and one point at 2ms
INSERT INTO test VALUES (1, 1, "a"), (1, 1, "b"), (2, 2, "a");
Affected Rows: 3
+-- explain at 0s, 5s and 10s. No point at 0s.
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
-- SQLNESS REPLACE (peer-.*) REDACTED
TQL EXPLAIN (0, 10, '5s') test;
diff --git a/tests/cases/standalone/common/aggregate/distinct_order_by.result b/tests/cases/standalone/common/aggregate/distinct_order_by.result
index bacfd3badb12..81649b776a58 100644
--- a/tests/cases/standalone/common/aggregate/distinct_order_by.result
+++ b/tests/cases/standalone/common/aggregate/distinct_order_by.result
@@ -15,6 +15,14 @@ SELECT DISTINCT i%2 FROM integers ORDER BY 1;
| 1 |
+-----------------------+
+-- TODO(LFC): Failed to run under new DataFusion
+-- expected:
+-- +-----------------------+
+-- | integers.i % Int64(2) |
+-- +-----------------------+
+-- | 1 |
+-- | 0 |
+-- +-----------------------+
SELECT DISTINCT i % 2 FROM integers WHERE i<3 ORDER BY i;
Error: 3000(PlanQuery), Error during planning: For SELECT DISTINCT, ORDER BY expressions i must appear in select list
diff --git a/tests/cases/standalone/common/insert/insert.result b/tests/cases/standalone/common/insert/insert.result
index 0de3fcda45b3..45f6a9137b46 100644
--- a/tests/cases/standalone/common/insert/insert.result
+++ b/tests/cases/standalone/common/insert/insert.result
@@ -21,6 +21,7 @@ SELECT * FROM integers;
| 1970-01-01T00:00:00.005 |
+-------------------------+
+-- Test insert with long string constant
CREATE TABLE IF NOT EXISTS presentations (
presentation_date TIMESTAMP,
author VARCHAR NOT NULL,
diff --git a/tests/cases/standalone/common/order/nulls_first.result b/tests/cases/standalone/common/order/nulls_first.result
index 8e0d8a733f6f..1bab6062f919 100644
--- a/tests/cases/standalone/common/order/nulls_first.result
+++ b/tests/cases/standalone/common/order/nulls_first.result
@@ -36,6 +36,10 @@ SELECT * FROM test ORDER BY i NULLS LAST, j NULLS FIRST;
| | 1 | 2 |
+---+---+---+
+-- TODO(ruihang): The following two SQL will fail under distributed mode with error
+-- Error: 1003(Internal), status: Internal, message: "Failed to collect recordbatch, source: Failed to poll stream, source: Arrow error: Invalid argument error: batches[0] schema is different with argument schema.\n batches[0] schema: Schema { fields: [Field { name: \"i\", data_type: Int32, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: \"j\", data_type: Int32, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: \"t\", data_type: Int64, nullable: false, dict_id: 0, dict_is_ordered: false, metadata: {\"greptime:time_index\": \"true\"} }], metadata: {\"greptime:version\": \"0\"} },\n argument schema: Schema { fields: [Field { name: \"i\", data_type: Int32, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: \"j\", data_type: Int32, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: \"t\", data_type: Int64, nullable: false, dict_id: 0, dict_is_ordered: false, metadata: {\"greptime:time_index\": \"true\"} }], metadata: {} }\n ", details: [], metadata: MetadataMap { headers: {"inner_error_code": "Internal"} }
+-- SELECT i, j, row_number() OVER (PARTITION BY i ORDER BY j NULLS FIRST) FROM test ORDER BY i NULLS FIRST, j NULLS FIRST;
+-- SELECT i, j, row_number() OVER (PARTITION BY i ORDER BY j NULLS LAST) FROM test ORDER BY i NULLS FIRST, j NULLS FIRST;
SELECT * FROM test ORDER BY i NULLS FIRST, j NULLS LAST LIMIT 2;
+---+---+---+
diff --git a/tests/cases/standalone/common/order/order_by.result b/tests/cases/standalone/common/order/order_by.result
index 640c593ac535..18210bfc53ba 100644
--- a/tests/cases/standalone/common/order/order_by.result
+++ b/tests/cases/standalone/common/order/order_by.result
@@ -192,10 +192,13 @@ SELECT a-10 AS k FROM test UNION SELECT a-10 AS l FROM test ORDER BY k;
| 3 |
+---+
+-- ORDER BY on alias in right-most query
+-- CONTROVERSIAL: SQLite allows both "k" and "l" to be referenced here, Postgres and MonetDB give an error.
SELECT a-10 AS k FROM test UNION SELECT a-10 AS l FROM test ORDER BY l;
Error: 3000(PlanQuery), No field named l. Valid fields are k.
+-- Not compatible with duckdb, work in gretimedb
SELECT a-10 AS k FROM test UNION SELECT a-10 AS l FROM test ORDER BY 1-k;
+---+
@@ -206,10 +209,18 @@ SELECT a-10 AS k FROM test UNION SELECT a-10 AS l FROM test ORDER BY 1-k;
| 1 |
+---+
+-- Not compatible with duckdb, give an error in greptimedb
+-- TODO(LFC): Failed to meet the expected error:
+-- expected:
+-- Error: 3000(PlanQuery), Schema error: No field named 'a'. Valid fields are 'k'.
SELECT a-10 AS k FROM test UNION SELECT a-10 AS l FROM test ORDER BY a-10;
Error: 3000(PlanQuery), Error during planning: For SELECT DISTINCT, ORDER BY expressions a must appear in select list
+-- Not compatible with duckdb, give an error in greptimedb
+-- TODO(LFC): Failed to meet the expected error:
+-- expected:
+-- Error: 3000(PlanQuery), Schema error: No field named 'a'. Valid fields are 'k'.
SELECT a-10 AS k FROM test UNION SELECT a-11 AS l FROM test ORDER BY a-11;
Error: 3000(PlanQuery), Error during planning: For SELECT DISTINCT, ORDER BY expressions a must appear in select list
diff --git a/tests/cases/standalone/common/order/order_by_exceptions.result b/tests/cases/standalone/common/order/order_by_exceptions.result
index f14bb99c0151..d75f2c043742 100644
--- a/tests/cases/standalone/common/order/order_by_exceptions.result
+++ b/tests/cases/standalone/common/order/order_by_exceptions.result
@@ -10,10 +10,12 @@ SELECT a FROM test ORDER BY 2;
Error: 3000(PlanQuery), Error during planning: Order by column out of bounds, specified: 2, max: 1
+-- Not work in greptimedb
SELECT a FROM test ORDER BY 'hello', a;
Error: 1003(Internal), Error during planning: Sort operation is not applicable to scalar value hello
+-- Ambiguous reference in union alias, give and error in duckdb, but works in greptimedb
SELECT a AS k, b FROM test UNION SELECT a, b AS k FROM test ORDER BY k;
+----+----+
@@ -38,6 +40,10 @@ SELECT a % 2, b FROM test UNION SELECT b, a % 2 AS k ORDER BY a % 2;
Error: 3000(PlanQuery), No field named b.
+-- Works duckdb, but not work in greptimedb
+-- TODO(LFC): Failed to meet the expected error:
+-- expected:
+-- Error: 3000(PlanQuery), Schema error: No field named 'a'. Valid fields are 'test.a % Int64(2)', 'b'.
SELECT a % 2, b FROM test UNION SELECT a % 2 AS k, b FROM test ORDER BY a % 2;
Error: 3000(PlanQuery), Error during planning: For SELECT DISTINCT, ORDER BY expressions a must appear in select list
diff --git a/tests/cases/standalone/common/tql/aggr_over_time.result b/tests/cases/standalone/common/tql/aggr_over_time.result
index 1608262bffcd..5b73fc5445c5 100644
--- a/tests/cases/standalone/common/tql/aggr_over_time.result
+++ b/tests/cases/standalone/common/tql/aggr_over_time.result
@@ -1,3 +1,7 @@
+-- Port from functions.test L607 - L630, commit 001ee2620e094970e5657ce39275b2fccdbd1359
+-- Include stddev/stdvar over time
+-- load 10s
+-- metric 0 8 8 2 3
create table metric (ts timestamp(3) time index, val double);
Affected Rows: 0
@@ -23,6 +27,8 @@ select * from metric;
| 1970-01-01T00:00:40 | 3.0 |
+---------------------+-----+
+-- eval instant at 1m stdvar_over_time(metric[1m])
+-- {} 10.56
tql eval (60, 61, '10s') stdvar_over_time(metric[1m]);
+---------------------+-------------------------------------+
@@ -31,6 +37,8 @@ tql eval (60, 61, '10s') stdvar_over_time(metric[1m]);
| 1970-01-01T00:01:00 | 10.559999999999999 |
+---------------------+-------------------------------------+
+-- eval instant at 1m stddev_over_time(metric[1m])
+-- {} 3.249615
tql eval (60, 60, '1s') stddev_over_time(metric[1m]);
+---------------------+-------------------------------------+
@@ -39,6 +47,8 @@ tql eval (60, 60, '1s') stddev_over_time(metric[1m]);
| 1970-01-01T00:01:00 | 3.249615361854384 |
+---------------------+-------------------------------------+
+-- eval instant at 1m stddev_over_time((metric[1m]))
+-- {} 3.249615
tql eval (60, 60, '1s') stddev_over_time((metric[1m]));
+---------------------+-------------------------------------+
@@ -51,6 +61,8 @@ drop table metric;
Affected Rows: 1
+-- load 10s
+-- metric 1.5990505637277868 1.5990505637277868 1.5990505637277868
create table metric (ts timestamp(3) time index, val double);
Affected Rows: 0
@@ -63,6 +75,8 @@ insert into metric values
Affected Rows: 4
+-- eval instant at 1m stdvar_over_time(metric[1m])
+-- {} 0
tql eval (60, 60, '1s') stdvar_over_time(metric[1m]);
+---------------------+-------------------------------------+
@@ -71,6 +85,8 @@ tql eval (60, 60, '1s') stdvar_over_time(metric[1m]);
| 1970-01-01T00:01:00 | 0.47943050725465364 |
+---------------------+-------------------------------------+
+-- eval instant at 1m stddev_over_time(metric[1m])
+-- {} 0
tql eval (60, 60, '1s') stddev_over_time(metric[1m]);
+---------------------+-------------------------------------+
@@ -83,6 +99,12 @@ drop table metric;
Affected Rows: 1
+-- Port from functions.test L632 - L680, commit 001ee2620e094970e5657ce39275b2fccdbd1359
+-- Include quantile over time
+-- load 10s
+-- data{test="two samples"} 0 1
+-- data{test="three samples"} 0 1 2
+-- data{test="uneven samples"} 0 1 4
create table data (ts timestamp(3) time index, val double, test string primary key);
Affected Rows: 0
@@ -99,10 +121,58 @@ insert into data values
Affected Rows: 8
+-- eval instant at 1m quantile_over_time(0, data[1m])
+-- {test="two samples"} 0
+-- {test="three samples"} 0
+-- {test="uneven samples"} 0
+-- tql eval (60, 60, '1s') quantile_over_time(0, data[1m]);
+-- eval instant at 1m quantile_over_time(0.5, data[1m])
+-- {test="two samples"} 0.5
+-- {test="three samples"} 1
+-- {test="uneven samples"} 1
+-- tql eval (60, 60, '1s') quantile_over_time(0.5, data[1m]);
+-- eval instant at 1m quantile_over_time(0.75, data[1m])
+-- {test="two samples"} 0.75
+-- {test="three samples"} 1.5
+-- {test="uneven samples"} 2.5
+-- tql eval (60, 60, '1s') quantile_over_time(0.75, data[1m]);
+-- eval instant at 1m quantile_over_time(0.8, data[1m])
+-- {test="two samples"} 0.8
+-- {test="three samples"} 1.6
+-- {test="uneven samples"} 2.8
+-- tql eval (60, 60, '1s') quantile_over_time(0.8, data[1m]);
+-- eval instant at 1m quantile_over_time(1, data[1m])
+-- {test="two samples"} 1
+-- {test="three samples"} 2
+-- {test="uneven samples"} 4
+-- tql eval (60, 60, '1s') quantile_over_time(1, data[1m]);
+-- eval instant at 1m quantile_over_time(-1, data[1m])
+-- {test="two samples"} -Inf
+-- {test="three samples"} -Inf
+-- {test="uneven samples"} -Inf
+-- tql eval (60, 60, '1s') quantile_over_time(-1, data[1m]);
+-- eval instant at 1m quantile_over_time(2, data[1m])
+-- {test="two samples"} +Inf
+-- {test="three samples"} +Inf
+-- {test="uneven samples"} +Inf
+-- tql eval (60, 60, '1s') quantile_over_time(2, data[1m]);
+-- eval instant at 1m (quantile_over_time(2, (data[1m])))
+-- {test="two samples"} +Inf
+-- {test="three samples"} +Inf
+-- {test="uneven samples"} +Inf
+-- tql eval (60, 60, '1s') (quantile_over_time(2, (data[1m])));
drop table data;
Affected Rows: 1
+-- Port from functions.test L773 - L802, commit 001ee2620e094970e5657ce39275b2fccdbd1359
+-- Include max/min/last over time
+-- load 10s
+-- data{type="numbers"} 2 0 3
+-- data{type="some_nan"} 2 0 NaN
+-- data{type="some_nan2"} 2 NaN 1
+-- data{type="some_nan3"} NaN 0 1
+-- data{type="only_nan"} NaN NaN NaN
create table data (ts timestamp(3) time index, val double, ty string primary key);
Affected Rows: 0
@@ -126,6 +196,27 @@ insert into data values
Affected Rows: 15
+-- eval instant at 1m min_over_time(data[1m])
+-- {type="numbers"} 0
+-- {type="some_nan"} 0
+-- {type="some_nan2"} 1
+-- {type="some_nan3"} 0
+-- {type="only_nan"} NaN
+-- tql eval (60, 60, '1s') min_over_time(data[1m]);
+-- eval instant at 1m max_over_time(data[1m])
+-- {type="numbers"} 3
+-- {type="some_nan"} 2
+-- {type="some_nan2"} 2
+-- {type="some_nan3"} 1
+-- {type="only_nan"} NaN
+-- tql eval (60, 60, '1s') max_over_time(data[1m]);
+-- eval instant at 1m last_over_time(data[1m])
+-- data{type="numbers"} 3
+-- data{type="some_nan"} NaN
+-- data{type="some_nan2"} 1
+-- data{type="some_nan3"} 1
+-- data{type="only_nan"} NaN
+-- tql eval (60, 60, '1s') last_over_time(data[1m]);
drop table data;
Affected Rows: 1
diff --git a/tests/cases/standalone/common/tql/basic.result b/tests/cases/standalone/common/tql/basic.result
index 09ce38cba53d..46f600f9ce4e 100644
--- a/tests/cases/standalone/common/tql/basic.result
+++ b/tests/cases/standalone/common/tql/basic.result
@@ -2,11 +2,13 @@ CREATE TABLE test(i DOUBLE, j TIMESTAMP TIME INDEX, k STRING PRIMARY KEY);
Affected Rows: 0
+-- insert two points at 1ms and one point at 2ms
INSERT INTO test VALUES (1, 1, "a"), (1, 1, "b"), (2, 2, "a");
Affected Rows: 3
-- SQLNESS SORT_RESULT 2 1
+-- evaluate at 0s, 5s and 10s. No point at 0s.
TQL EVAL (0, 10, '5s') test;
+-----+---------------------+---+
@@ -18,6 +20,7 @@ TQL EVAL (0, 10, '5s') test;
| 2.0 | 1970-01-01T00:00:10 | a |
+-----+---------------------+---+
+-- the point at 1ms will be shadowed by the point at 2ms
TQL EVAL (0, 10, '5s') test{k="a"};
+-----+---------------------+---+
diff --git a/tests/cases/standalone/common/tql/operator.result b/tests/cases/standalone/common/tql/operator.result
index 360d2bc48a68..1ad0461978c4 100644
--- a/tests/cases/standalone/common/tql/operator.result
+++ b/tests/cases/standalone/common/tql/operator.result
@@ -1,3 +1,9 @@
+-- Port from operators.test L607 - L630, commit 001ee2620e094970e5657ce39275b2fccdbd1359
+-- Include atan2
+-- load 5m
+-- trigy{} 10
+-- trigx{} 20
+-- trigNaN{} NaN
create table trigy (ts timestamp(3) time index, val double);
Affected Rows: 0
@@ -22,6 +28,8 @@ insert into trignan values (0, 'NaN'::double);
Affected Rows: 1
+-- eval instant at 5m trigy atan2 trigx
+-- trigy{} 0.4636476090008061
tql eval (300, 300, '1s') trigy atan2 trigx;
+---------------------+----------------------------+
@@ -30,11 +38,16 @@ tql eval (300, 300, '1s') trigy atan2 trigx;
| 1970-01-01T00:05:00 | 0.4636476090008061 |
+---------------------+----------------------------+
+-- eval instant at 5m trigy atan2 trigNaN
+-- trigy{} NaN
+-- This query doesn't have result because `trignan` is NaN and will be filtered out.
tql eval (300, 300, '1s') trigy atan2 trignan;
++
++
+-- eval instant at 5m 10 atan2 20
+-- 0.4636476090008061
tql eval (300, 300, '1s') 10 atan2 20;
+---------------------+--------------------+
@@ -43,6 +56,8 @@ tql eval (300, 300, '1s') 10 atan2 20;
| 1970-01-01T00:05:00 | 0.4636476090008061 |
+---------------------+--------------------+
+-- eval instant at 5m 10 atan2 NaN
+-- NaN
tql eval (300, 300, '1s') 10 atan2 NaN;
+---------------------+-------+
diff --git a/tests/cases/standalone/common/types/blob.result b/tests/cases/standalone/common/types/blob.result
index a35e89bc112b..1673d99fe228 100644
--- a/tests/cases/standalone/common/types/blob.result
+++ b/tests/cases/standalone/common/types/blob.result
@@ -2,6 +2,7 @@ CREATE TABLE blobs (b BYTEA, t timestamp time index);
Affected Rows: 0
+--Insert valid hex strings--
INSERT INTO blobs VALUES('\xaa\xff\xaa'::BYTEA, 1), ('\xAA\xFF\xAA\xAA\xFF\xAA'::BYTEA, 2), ('\xAA\xFF\xAA\xAA\xFF\xAA\xAA\xFF\xAA'::BYTEA, 3);
Affected Rows: 3
@@ -16,6 +17,7 @@ SELECT * FROM blobs;
| 5c7841415c7846465c7841415c7841415c7846465c7841415c7841415c7846465c784141 | 1970-01-01T00:00:00.003 |
+--------------------------------------------------------------------------+-------------------------+
+--Insert valid hex strings, lower case--
DELETE FROM blobs;
Affected Rows: 3
@@ -34,6 +36,7 @@ SELECT * FROM blobs;
| 5c7861615c7866665c7861615c7861615c7866665c7861615c7861615c7866665c786161 | 1970-01-01T00:00:00.003 |
+--------------------------------------------------------------------------+-------------------------+
+--Insert valid hex strings with number and letters--
DELETE FROM blobs;
Affected Rows: 3
@@ -52,10 +55,12 @@ SELECT * FROM blobs;
| 5c78616131313939616131313939616131313939 | 1970-01-01T00:00:00.003 |
+------------------------------------------+-------------------------+
+--Insert invalid hex strings (invalid hex chars: G, H, I)--
INSERT INTO blobs VALUES('\xGA\xFF\xAA'::BYTEA, 4);
Affected Rows: 1
+--Insert invalid hex strings (odd # of chars)--
INSERT INTO blobs VALUES('\xA'::BYTEA, 4);
Affected Rows: 1
diff --git a/tests/cases/standalone/cte/cte.result b/tests/cases/standalone/cte/cte.result
index 5dedec787e69..562883a9ef6f 100644
--- a/tests/cases/standalone/cte/cte.result
+++ b/tests/cases/standalone/cte/cte.result
@@ -59,6 +59,7 @@ with cte1 as (select 42), cte1 as (select 42) select * FROM cte1;
Error: 3000(PlanQuery), sql parser error: WITH query name "cte1" specified more than once
+-- reference to CTE before its actually defined, it's not supported by datafusion
with cte3 as (select ref2.j as i from cte1 as ref2), cte1 as (Select i as j from a), cte2 as (select ref.j+1 as k from cte1 as ref) select * from cte2 union all select * FROM cte3;
Error: 3000(PlanQuery), Error during planning: Table not found: greptime.public.cte1
@@ -96,6 +97,7 @@ SELECT 1 UNION ALL (WITH cte AS (SELECT 42) SELECT * FROM cte) order by 1;
| 42 |
+----------+
+-- Recursive CTEs are not supported in datafusion
WITH RECURSIVE cte(d) AS (
SELECT 1
UNION ALL
@@ -109,6 +111,7 @@ SELECT max(d) FROM cte;
Error: 3000(PlanQuery), This feature is not implemented: Recursive CTEs are not supported
+-- Nested aliases is not supported in datafusion
with cte (a) as (
select 1
)
diff --git a/tests/cases/standalone/cte/cte_in_cte.result b/tests/cases/standalone/cte/cte_in_cte.result
index d9b18ebe2e67..b20ba4b40c63 100644
--- a/tests/cases/standalone/cte/cte_in_cte.result
+++ b/tests/cases/standalone/cte/cte_in_cte.result
@@ -50,14 +50,19 @@ with cte1 as (Select i as j from a) select * from (with cte2 as (select max(j) a
| 42 |
+----+
+-- Refer to CTE in subquery expression,
+-- this feature is not implemented in datafusion
with cte1 as (Select i as j from a) select * from cte1 where j = (with cte2 as (select max(j) as j from cte1) select j from cte2);
Error: 3001(EngineExecuteQuery), This feature is not implemented: Physical plan does not support logical expression (<subquery>)
+-- Refer to same-named CTE in a subquery expression
+-- this feature is not implemented in datafusion
with cte as (Select i as j from a) select * from cte where j = (with cte as (select max(j) as j from cte) select j from cte);
Error: 3000(PlanQuery), sql parser error: WITH query name "cte" specified more than once
+-- self-refer to non-existent cte-
with cte as (select * from cte) select * from cte;
Error: 3000(PlanQuery), Error during planning: Table not found: greptime.public.cte
diff --git a/tests/cases/standalone/optimizer/filter_push_down.result b/tests/cases/standalone/optimizer/filter_push_down.result
index e48471107f44..85ded032de15 100644
--- a/tests/cases/standalone/optimizer/filter_push_down.result
+++ b/tests/cases/standalone/optimizer/filter_push_down.result
@@ -187,6 +187,16 @@ SELECT i FROM (SELECT * FROM integers i1 UNION SELECT * FROM integers i2) a WHER
| 3 |
+---+
+-- TODO(LFC): Somehow the following SQL does not order by column 1 under new DataFusion occasionally. Should further investigate it. Comment it out temporarily.
+-- expected:
+-- +---+---+--------------+
+-- | a | b | ROW_NUMBER() |
+-- +---+---+--------------+
+-- | 1 | 1 | 1 |
+-- | 2 | 2 | 5 |
+-- | 3 | 3 | 9 |
+-- +---+---+--------------+
+-- SELECT * FROM (SELECT i1.i AS a, i2.i AS b, row_number() OVER (ORDER BY i1.i, i2.i) FROM integers i1, integers i2 WHERE i1.i IS NOT NULL AND i2.i IS NOT NULL) a1 WHERE a=b ORDER BY 1;
SELECT * FROM (SELECT 0=1 AS cond FROM integers i1, integers i2) a1 WHERE cond ORDER BY 1;
++
diff --git a/tests/cases/standalone/tql-explain-analyze/analyze.result b/tests/cases/standalone/tql-explain-analyze/analyze.result
index d8e767d84b82..db3dcb265db6 100644
--- a/tests/cases/standalone/tql-explain-analyze/analyze.result
+++ b/tests/cases/standalone/tql-explain-analyze/analyze.result
@@ -2,10 +2,12 @@ CREATE TABLE test(i DOUBLE, j TIMESTAMP TIME INDEX, k STRING PRIMARY KEY);
Affected Rows: 0
+-- insert two points at 1ms and one point at 2ms
INSERT INTO test VALUES (1, 1, "a"), (1, 1, "b"), (2, 2, "a");
Affected Rows: 3
+-- analyze at 0s, 5s and 10s. No point at 0s.
-- SQLNESS REPLACE (metrics.*) REDACTED
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
-- SQLNESS REPLACE (-+) -
diff --git a/tests/cases/standalone/tql-explain-analyze/explain.result b/tests/cases/standalone/tql-explain-analyze/explain.result
index 8a1c23626b4b..a9f501ff783e 100644
--- a/tests/cases/standalone/tql-explain-analyze/explain.result
+++ b/tests/cases/standalone/tql-explain-analyze/explain.result
@@ -2,10 +2,12 @@ CREATE TABLE test(i DOUBLE, j TIMESTAMP TIME INDEX, k STRING PRIMARY KEY);
Affected Rows: 0
+-- insert two points at 1ms and one point at 2ms
INSERT INTO test VALUES (1, 1, "a"), (1, 1, "b"), (2, 2, "a");
Affected Rows: 3
+-- explain at 0s, 5s and 10s. No point at 0s.
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
TQL EXPLAIN (0, 10, '5s') test;
diff --git a/tests/runner/Cargo.toml b/tests/runner/Cargo.toml
index f9e0bdd032f0..d14566f6f048 100644
--- a/tests/runner/Cargo.toml
+++ b/tests/runner/Cargo.toml
@@ -13,6 +13,6 @@ common-grpc = { path = "../../src/common/grpc" }
common-query = { path = "../../src/common/query" }
common-time = { path = "../../src/common/time" }
serde.workspace = true
-sqlness = { git = "https://github.com/CeresDB/sqlness.git", rev = "a4663365795d2067eb53966c383e1bb0c89c7627" }
+sqlness = { version = "0.5" }
tinytemplate = "1.2"
tokio.workspace = true
|
chore
|
bump sqlness to 0.5.0 (#1877)
|
19d2d77b41e0a1d89bf2851d9f42c04207409942
|
2023-08-24 09:22:15
|
Lei, HUANG
|
fix: parse large timestamp (#2185)
| false
|
diff --git a/src/common/time/src/timestamp.rs b/src/common/time/src/timestamp.rs
index a7fd00a2da8c..2a066e9906f0 100644
--- a/src/common/time/src/timestamp.rs
+++ b/src/common/time/src/timestamp.rs
@@ -29,6 +29,16 @@ use crate::error::{ArithmeticOverflowSnafu, Error, ParseTimestampSnafu, Timestam
use crate::timezone::TimeZone;
use crate::util::{div_ceil, format_utc_datetime, local_datetime_to_utc};
+/// Timestamp represents the value of units(seconds/milliseconds/microseconds/nanoseconds) elapsed
+/// since UNIX epoch. The valid value range of [Timestamp] depends on it's unit (all in UTC time zone):
+/// - for [TimeUnit::Second]: [-262144-01-01 00:00:00, +262143-12-31 23:59:59]
+/// - for [TimeUnit::Millisecond]: [-262144-01-01 00:00:00.000, +262143-12-31 23:59:59.999]
+/// - for [TimeUnit::Microsecond]: [-262144-01-01 00:00:00.000000, +262143-12-31 23:59:59.999999]
+/// - for [TimeUnit::Nanosecond]: [1677-09-21 00:12:43.145225, 2262-04-11 23:47:16.854775807]
+///
+/// # Note:
+/// For values out of range, you can still store these timestamps, but while performing arithmetic
+/// or formatting operations, it will return an error or just overflow.
#[derive(Debug, Clone, Default, Copy, Serialize, Deserialize)]
pub struct Timestamp {
value: i64,
@@ -169,6 +179,28 @@ impl Timestamp {
(sec_div, nsec)
}
+ /// Creates a new Timestamp instance from seconds and nanoseconds parts.
+ /// Returns None if overflow.
+ fn from_splits(sec: i64, nsec: u32) -> Option<Self> {
+ if nsec == 0 {
+ Some(Timestamp::new_second(sec))
+ } else if nsec % 1_000_000 == 0 {
+ let millis = nsec / 1_000_000;
+ sec.checked_mul(1000)
+ .and_then(|v| v.checked_add(millis as i64))
+ .map(Timestamp::new_millisecond)
+ } else if nsec % 1000 == 0 {
+ let micros = nsec / 1000;
+ sec.checked_mul(1_000_000)
+ .and_then(|v| v.checked_add(micros as i64))
+ .map(Timestamp::new_microsecond)
+ } else {
+ sec.checked_mul(1_000_000_000)
+ .and_then(|v| v.checked_add(nsec as i64))
+ .map(Timestamp::new_nanosecond)
+ }
+ }
+
/// Format timestamp to ISO8601 string. If the timestamp exceeds what chrono timestamp can
/// represent, this function simply print the timestamp unit and value in plain string.
pub fn to_iso8601_string(&self) -> String {
@@ -205,6 +237,12 @@ impl Timestamp {
let (sec, nsec) = self.split();
NaiveDateTime::from_timestamp_opt(sec, nsec)
}
+
+ pub fn from_chrono_datetime(ndt: NaiveDateTime) -> Option<Self> {
+ let sec = ndt.timestamp();
+ let nsec = ndt.timestamp_subsec_nanos();
+ Timestamp::from_splits(sec, nsec)
+ }
}
impl FromStr for Timestamp {
@@ -225,13 +263,16 @@ impl FromStr for Timestamp {
// RFC3339 timestamp (with a T)
let s = s.trim();
if let Ok(ts) = DateTime::parse_from_rfc3339(s) {
- return Ok(Timestamp::new(ts.timestamp_nanos(), TimeUnit::Nanosecond));
+ return Timestamp::from_chrono_datetime(ts.naive_utc())
+ .context(ParseTimestampSnafu { raw: s });
}
if let Ok(ts) = DateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S%.f%:z") {
- return Ok(Timestamp::new(ts.timestamp_nanos(), TimeUnit::Nanosecond));
+ return Timestamp::from_chrono_datetime(ts.naive_utc())
+ .context(ParseTimestampSnafu { raw: s });
}
if let Ok(ts) = Utc.datetime_from_str(s, "%Y-%m-%d %H:%M:%S%.fZ") {
- return Ok(Timestamp::new(ts.timestamp_nanos(), TimeUnit::Nanosecond));
+ return Timestamp::from_chrono_datetime(ts.naive_utc())
+ .context(ParseTimestampSnafu { raw: s });
}
if let Ok(ts) = NaiveDateTime::parse_from_str(s, "%Y-%m-%dT%H:%M:%S") {
@@ -264,7 +305,7 @@ fn naive_datetime_to_timestamp(
match local_datetime_to_utc(&datetime) {
LocalResult::None => ParseTimestampSnafu { raw: s }.fail(),
LocalResult::Single(utc) | LocalResult::Ambiguous(utc, _) => {
- Ok(Timestamp::new(utc.timestamp_nanos(), TimeUnit::Nanosecond))
+ Timestamp::from_chrono_datetime(utc).context(ParseTimestampSnafu { raw: s })
}
}
}
@@ -608,11 +649,7 @@ mod tests {
// but expected timestamp is in UTC timezone
fn check_from_str(s: &str, expect: &str) {
let ts = Timestamp::from_str(s).unwrap();
- let time = NaiveDateTime::from_timestamp_opt(
- ts.value / 1_000_000_000,
- (ts.value % 1_000_000_000) as u32,
- )
- .unwrap();
+ let time = ts.to_chrono_datetime().unwrap();
assert_eq!(expect, time.to_string());
}
@@ -1049,4 +1086,70 @@ mod tests {
TimeUnit::from(ArrowTimeUnit::Nanosecond)
);
}
+
+ fn check_conversion(ts: Timestamp, valid: bool) {
+ let Some(t2) = ts.to_chrono_datetime() else {
+ if valid {
+ panic!("Cannot convert {:?} to Chrono NaiveDateTime", ts);
+ }
+ return;
+ };
+ let Some(t3) = Timestamp::from_chrono_datetime(t2) else {
+ if valid {
+ panic!("Cannot convert Chrono NaiveDateTime {:?} to Timestamp", t2);
+ }
+ return;
+ };
+
+ assert_eq!(t3, ts);
+ }
+
+ #[test]
+ fn test_from_naive_date_time() {
+ let min_sec = Timestamp::new_second(-8334632851200);
+ let max_sec = Timestamp::new_second(8210298412799);
+ check_conversion(min_sec, true);
+ check_conversion(Timestamp::new_second(min_sec.value - 1), false);
+ check_conversion(max_sec, true);
+ check_conversion(Timestamp::new_second(max_sec.value + 1), false);
+
+ let min_millis = Timestamp::new_millisecond(-8334632851200000);
+ let max_millis = Timestamp::new_millisecond(8210298412799999);
+ check_conversion(min_millis, true);
+ check_conversion(Timestamp::new_millisecond(min_millis.value - 1), false);
+ check_conversion(max_millis, true);
+ check_conversion(Timestamp::new_millisecond(max_millis.value + 1), false);
+
+ let min_micros = Timestamp::new_microsecond(-8334632851200000000);
+ let max_micros = Timestamp::new_microsecond(8210298412799999999);
+ check_conversion(min_micros, true);
+ check_conversion(Timestamp::new_microsecond(min_micros.value - 1), false);
+ check_conversion(max_micros, true);
+ check_conversion(Timestamp::new_microsecond(max_micros.value + 1), false);
+
+ let min_nanos = Timestamp::new_nanosecond(-9223372036854775000);
+ let max_nanos = Timestamp::new_nanosecond(i64::MAX);
+ check_conversion(min_nanos, true);
+ check_conversion(Timestamp::new_nanosecond(min_nanos.value - 1), false);
+ check_conversion(max_nanos, true);
+ }
+
+ #[test]
+ fn test_parse_timestamp_range() {
+ let valid_strings = vec![
+ "-262144-01-01 00:00:00Z",
+ "+262143-12-31 23:59:59Z",
+ "-262144-01-01 00:00:00Z",
+ "+262143-12-31 23:59:59.999Z",
+ "-262144-01-01 00:00:00Z",
+ "+262143-12-31 23:59:59.999999Z",
+ "1677-09-21 00:12:43.145225Z",
+ "2262-04-11 23:47:16.854775807Z",
+ "+100000-01-01 00:00:01.5Z",
+ ];
+
+ for s in valid_strings {
+ Timestamp::from_str(s).unwrap();
+ }
+ }
}
diff --git a/src/query/src/optimizer/type_conversion.rs b/src/query/src/optimizer/type_conversion.rs
index 64f999b50f8d..debdb881374d 100644
--- a/src/query/src/optimizer/type_conversion.rs
+++ b/src/query/src/optimizer/type_conversion.rs
@@ -276,14 +276,16 @@ fn timestamp_to_timestamp_ms_expr(val: i64, unit: TimeUnit) -> Expr {
}
fn string_to_timestamp_ms(string: &str) -> Result<ScalarValue> {
- Ok(ScalarValue::TimestampMillisecond(
- Some(
- Timestamp::from_str(string)
- .map(|t| t.value() / 1_000_000)
- .map_err(|e| DataFusionError::External(Box::new(e)))?,
- ),
- None,
- ))
+ let ts = Timestamp::from_str(string).map_err(|e| DataFusionError::External(Box::new(e)))?;
+
+ let value = Some(ts.value());
+ let scalar = match ts.unit() {
+ TimeUnit::Second => ScalarValue::TimestampSecond(value, None),
+ TimeUnit::Millisecond => ScalarValue::TimestampMillisecond(value, None),
+ TimeUnit::Microsecond => ScalarValue::TimestampMicrosecond(value, None),
+ TimeUnit::Nanosecond => ScalarValue::TimestampNanosecond(value, None),
+ };
+ Ok(scalar)
}
#[cfg(test)]
@@ -302,11 +304,11 @@ mod tests {
fn test_string_to_timestamp_ms() {
assert_eq!(
string_to_timestamp_ms("2022-02-02 19:00:00+08:00").unwrap(),
- ScalarValue::TimestampMillisecond(Some(1643799600000), None)
+ ScalarValue::TimestampSecond(Some(1643799600), None)
);
assert_eq!(
string_to_timestamp_ms("2009-02-13 23:31:30Z").unwrap(),
- ScalarValue::TimestampMillisecond(Some(1234567890000), None)
+ ScalarValue::TimestampSecond(Some(1234567890), None)
);
}
@@ -366,9 +368,10 @@ mod tests {
let mut converter = TypeConverter { schema };
assert_eq!(
- Expr::Column(Column::from_name("ts")).gt(Expr::Literal(
- ScalarValue::TimestampMillisecond(Some(1599514949000), None)
- )),
+ Expr::Column(Column::from_name("ts")).gt(Expr::Literal(ScalarValue::TimestampSecond(
+ Some(1599514949),
+ None
+ ))),
converter
.mutate(
Expr::Column(Column::from_name("ts")).gt(Expr::Literal(ScalarValue::Utf8(
@@ -440,7 +443,7 @@ mod tests {
.unwrap();
let expected = String::from(
"Aggregate: groupBy=[[]], aggr=[[COUNT(column1)]]\
- \n Filter: column3 > TimestampMillisecond(-28800000, None)\
+ \n Filter: column3 > TimestampSecond(-28800, None)\
\n Values: (Int64(1), Float64(1), TimestampMillisecond(1, None))",
);
assert_eq!(format!("{}", transformed_plan.display_indent()), expected);
diff --git a/src/storage/src/window_infer.rs b/src/storage/src/window_infer.rs
index 4505fe093117..35c06bb14470 100644
--- a/src/storage/src/window_infer.rs
+++ b/src/storage/src/window_infer.rs
@@ -23,17 +23,21 @@ use crate::memtable::MemtableStats;
use crate::sst::FileMeta;
/// A set of predefined time windows.
-const TIME_WINDOW_SIZE: [i64; 10] = [
- 1, // 1 second
- 60, // 1 minute
- 60 * 10, // 10 minutes
- 60 * 30, // 30 minutes
- 60 * 60, // 1 hour
- 2 * 60 * 60, // 2 hours
- 6 * 60 * 60, // 6 hours
- 12 * 60 * 60, // 12 hours
- 24 * 60 * 60, // 1 day
- 7 * 24 * 60 * 60, // 1 week
+const TIME_WINDOW_SIZE: [i64; 14] = [
+ 1, // 1 second
+ 60, // 1 minute
+ 60 * 10, // 10 minutes
+ 60 * 30, // 30 minutes
+ 60 * 60, // 1 hour
+ 2 * 60 * 60, // 2 hours
+ 6 * 60 * 60, // 6 hours
+ 12 * 60 * 60, // 12 hours
+ 24 * 60 * 60, // 1 day
+ 7 * 24 * 60 * 60, // 1 week
+ 30 * 24 * 60 * 60, // 1 month
+ 12 * 30 * 24 * 60 * 60, // 1 year
+ 10 * 12 * 30 * 24 * 60 * 60, // 10 years
+ 100 * 12 * 30 * 24 * 60 * 60, // 100 years
];
/// [WindowInfer] infers the time windows that can be used to optimize table scans ordered by
@@ -180,14 +184,8 @@ mod tests {
assert_eq!(12 * 60 * 60, duration_to_window_size(21601, 21601));
assert_eq!(24 * 60 * 60, duration_to_window_size(43201, 43201));
assert_eq!(7 * 24 * 60 * 60, duration_to_window_size(604799, 604799));
- assert_eq!(
- 7 * 24 * 60 * 60,
- duration_to_window_size(31535999, 31535999)
- );
- assert_eq!(
- 7 * 24 * 60 * 60,
- duration_to_window_size(i64::MAX, i64::MAX)
- );
+ assert_eq!(311040000, duration_to_window_size(31535999, 31535999));
+ assert_eq!(3110400000, duration_to_window_size(i64::MAX, i64::MAX));
}
#[test]
diff --git a/tests/cases/standalone/common/timestamp/timestamp.result b/tests/cases/standalone/common/timestamp/timestamp.result
index 235bbddef4af..b52ee558fc94 100644
--- a/tests/cases/standalone/common/timestamp/timestamp.result
+++ b/tests/cases/standalone/common/timestamp/timestamp.result
@@ -26,14 +26,57 @@ INSERT INTO timestamp_with_precision(ts,cnt) VALUES ('2023-04-04 08:00:00.0052+0
Affected Rows: 1
+INSERT INTO timestamp_with_precision(ts,cnt) VALUES ('+100000-01-01 00:00:01.5Z', 3);
+
+Affected Rows: 1
+
+INSERT INTO timestamp_with_precision(ts,cnt) VALUES ('-262144-01-01 00:00:00Z', 4);
+
+Affected Rows: 1
+
+INSERT INTO timestamp_with_precision(ts,cnt) VALUES ('+262143-12-31 23:59:59Z', 5);
+
+Affected Rows: 1
+
+INSERT INTO timestamp_with_precision(ts,cnt) VALUES ('-262144-01-01 00:00:00Z', 6);
+
+Affected Rows: 1
+
+INSERT INTO timestamp_with_precision(ts,cnt) VALUES ('+262143-12-31 23:59:59.999Z', 7);
+
+Affected Rows: 1
+
+INSERT INTO timestamp_with_precision(ts,cnt) VALUES ('-262144-01-01 00:00:00Z', 8);
+
+Affected Rows: 1
+
+INSERT INTO timestamp_with_precision(ts,cnt) VALUES ('+262143-12-31 23:59:59.999999Z', 9);
+
+Affected Rows: 1
+
+INSERT INTO timestamp_with_precision(ts,cnt) VALUES ('1677-09-21 00:12:43.145225Z', 10);
+
+Affected Rows: 1
+
+INSERT INTO timestamp_with_precision(ts,cnt) VALUES ('2262-04-11 23:47:16.854775807Z', 11);
+
+Affected Rows: 1
+
SELECT * FROM timestamp_with_precision ORDER BY ts ASC;
-+----------------------------+-----+
-| ts | cnt |
-+----------------------------+-----+
-| 2023-04-04T00:00:00.005200 | 2 |
-| 2023-04-04T08:00:00.005200 | 1 |
-+----------------------------+-----+
++-------------------------------+-----+
+| ts | cnt |
++-------------------------------+-----+
+| -262144-01-01T00:00:00 | 8 |
+| 1677-09-21T00:12:43.145225 | 10 |
+| 2023-04-04T00:00:00.005200 | 2 |
+| 2023-04-04T08:00:00.005200 | 1 |
+| 2262-04-11T23:47:16.854775 | 11 |
+| +100000-01-01T00:00:01.500 | 3 |
+| +262143-12-31T23:59:59 | 5 |
+| +262143-12-31T23:59:59.999 | 7 |
+| +262143-12-31T23:59:59.999999 | 9 |
++-------------------------------+-----+
DROP TABLE timestamp_with_precision;
diff --git a/tests/cases/standalone/common/timestamp/timestamp.sql b/tests/cases/standalone/common/timestamp/timestamp.sql
index 2a650f2aa66a..19ac7e64e553 100644
--- a/tests/cases/standalone/common/timestamp/timestamp.sql
+++ b/tests/cases/standalone/common/timestamp/timestamp.sql
@@ -8,6 +8,24 @@ INSERT INTO timestamp_with_precision(ts,cnt) VALUES ('2023-04-04 08:00:00.0052+0
INSERT INTO timestamp_with_precision(ts,cnt) VALUES ('2023-04-04 08:00:00.0052+0800', 2);
+INSERT INTO timestamp_with_precision(ts,cnt) VALUES ('+100000-01-01 00:00:01.5Z', 3);
+
+INSERT INTO timestamp_with_precision(ts,cnt) VALUES ('-262144-01-01 00:00:00Z', 4);
+
+INSERT INTO timestamp_with_precision(ts,cnt) VALUES ('+262143-12-31 23:59:59Z', 5);
+
+INSERT INTO timestamp_with_precision(ts,cnt) VALUES ('-262144-01-01 00:00:00Z', 6);
+
+INSERT INTO timestamp_with_precision(ts,cnt) VALUES ('+262143-12-31 23:59:59.999Z', 7);
+
+INSERT INTO timestamp_with_precision(ts,cnt) VALUES ('-262144-01-01 00:00:00Z', 8);
+
+INSERT INTO timestamp_with_precision(ts,cnt) VALUES ('+262143-12-31 23:59:59.999999Z', 9);
+
+INSERT INTO timestamp_with_precision(ts,cnt) VALUES ('1677-09-21 00:12:43.145225Z', 10);
+
+INSERT INTO timestamp_with_precision(ts,cnt) VALUES ('2262-04-11 23:47:16.854775807Z', 11);
+
SELECT * FROM timestamp_with_precision ORDER BY ts ASC;
DROP TABLE timestamp_with_precision;
|
fix
|
parse large timestamp (#2185)
|
e18416a726ecd4ba86098d5697d8721abdfa2937
|
2025-01-08 14:32:49
|
Ning Sun
|
ci: do not trigger tests when there is a merge conflict (#5318)
| false
|
diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml
index f446ccfc6c0b..94528e9b8194 100644
--- a/.github/workflows/develop.yml
+++ b/.github/workflows/develop.yml
@@ -641,11 +641,19 @@ jobs:
- name: Run cargo clippy
run: make clippy
+ conflict-check:
+ name: Check for conflict
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Merge Conflict Finder
+ uses: olivernybroe/[email protected]
+
coverage:
if: github.event.pull_request.draft == false
runs-on: ubuntu-20.04-8-cores
timeout-minutes: 60
- needs: [clippy, fmt]
+ needs: [conflict-check, clippy, fmt]
steps:
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
@@ -658,6 +666,7 @@ jobs:
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
components: llvm-tools-preview
+ cache: false
# - name: Rust Cache
# uses: Swatinem/rust-cache@v2
# with:
|
ci
|
do not trigger tests when there is a merge conflict (#5318)
|
a58256d4d3f031470dd765a8cbc849dbbba6863a
|
2024-05-24 12:59:07
|
Ruihang Xia
|
feat: round-robin selector (#4024)
| false
|
diff --git a/src/common/meta/src/ddl/table_meta.rs b/src/common/meta/src/ddl/table_meta.rs
index 2b55315ec4c8..4ce4c1589411 100644
--- a/src/common/meta/src/ddl/table_meta.rs
+++ b/src/common/meta/src/ddl/table_meta.rs
@@ -184,10 +184,10 @@ impl TableMetadataAllocator {
pub type PeerAllocatorRef = Arc<dyn PeerAllocator>;
-/// [PeerAllocator] allocates [Peer]s for creating regions.
+/// [`PeerAllocator`] allocates [`Peer`]s for creating regions.
#[async_trait]
pub trait PeerAllocator: Send + Sync {
- /// Allocates `regions` size [Peer]s.
+ /// Allocates `regions` size [`Peer`]s.
async fn alloc(&self, ctx: &TableMetadataAllocatorContext, regions: usize)
-> Result<Vec<Peer>>;
}
diff --git a/src/meta-srv/src/bootstrap.rs b/src/meta-srv/src/bootstrap.rs
index f5ca9174eace..b860db6a24f2 100644
--- a/src/meta-srv/src/bootstrap.rs
+++ b/src/meta-srv/src/bootstrap.rs
@@ -46,6 +46,7 @@ use crate::metasrv::builder::MetasrvBuilder;
use crate::metasrv::{Metasrv, MetasrvOptions, SelectorRef};
use crate::selector::lease_based::LeaseBasedSelector;
use crate::selector::load_based::LoadBasedSelector;
+use crate::selector::round_robin::RoundRobinSelector;
use crate::selector::SelectorType;
use crate::service::admin;
use crate::{error, Result};
@@ -228,6 +229,7 @@ pub async fn metasrv_builder(
let selector = match opts.selector {
SelectorType::LoadBased => Arc::new(LoadBasedSelector::default()) as SelectorRef,
SelectorType::LeaseBased => Arc::new(LeaseBasedSelector) as SelectorRef,
+ SelectorType::RoundRobin => Arc::new(RoundRobinSelector::default()) as SelectorRef,
};
Ok(MetasrvBuilder::new()
diff --git a/src/meta-srv/src/cluster.rs b/src/meta-srv/src/cluster.rs
index f7f7fac27722..8f037848635a 100644
--- a/src/meta-srv/src/cluster.rs
+++ b/src/meta-srv/src/cluster.rs
@@ -306,6 +306,11 @@ impl MetaPeerClient {
.map(|election| election.is_leader())
.unwrap_or(true)
}
+
+ #[cfg(test)]
+ pub(crate) fn memory_backend(&self) -> ResettableKvBackendRef {
+ self.in_memory.clone()
+ }
}
fn to_stat_kv_map(kvs: Vec<KeyValue>) -> Result<HashMap<StatKey, StatValue>> {
diff --git a/src/meta-srv/src/selector.rs b/src/meta-srv/src/selector.rs
index 4c3a91caef2b..8cc159445844 100644
--- a/src/meta-srv/src/selector.rs
+++ b/src/meta-srv/src/selector.rs
@@ -15,6 +15,7 @@
mod common;
pub mod lease_based;
pub mod load_based;
+pub mod round_robin;
mod weight_compute;
mod weighted_choose;
@@ -61,6 +62,7 @@ pub enum SelectorType {
#[default]
LoadBased,
LeaseBased,
+ RoundRobin,
}
impl TryFrom<&str> for SelectorType {
@@ -70,6 +72,7 @@ impl TryFrom<&str> for SelectorType {
match value {
"load_based" | "LoadBased" => Ok(SelectorType::LoadBased),
"lease_based" | "LeaseBased" => Ok(SelectorType::LeaseBased),
+ "round_robin" | "RoundRobin" => Ok(SelectorType::RoundRobin),
other => error::UnsupportedSelectorTypeSnafu {
selector_type: other,
}
diff --git a/src/meta-srv/src/selector/lease_based.rs b/src/meta-srv/src/selector/lease_based.rs
index bdfffacf0529..dabf5a0c8f53 100644
--- a/src/meta-srv/src/selector/lease_based.rs
+++ b/src/meta-srv/src/selector/lease_based.rs
@@ -21,6 +21,7 @@ use crate::selector::common::choose_peers;
use crate::selector::weighted_choose::{RandomWeightedChoose, WeightedItem};
use crate::selector::{Namespace, Selector, SelectorOptions};
+/// Select all alive datanodes based using a random weighted choose.
pub struct LeaseBasedSelector;
#[async_trait::async_trait]
diff --git a/src/meta-srv/src/selector/round_robin.rs b/src/meta-srv/src/selector/round_robin.rs
new file mode 100644
index 000000000000..4355837fc7f1
--- /dev/null
+++ b/src/meta-srv/src/selector/round_robin.rs
@@ -0,0 +1,138 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::atomic::AtomicUsize;
+
+use common_meta::peer::Peer;
+use snafu::ensure;
+
+use crate::error::{NoEnoughAvailableDatanodeSnafu, Result};
+use crate::lease;
+use crate::metasrv::SelectorContext;
+use crate::selector::{Namespace, Selector, SelectorOptions};
+
+/// Round-robin selector that returns the next peer in the list in sequence.
+/// Datanodes are ordered by their node_id.
+///
+/// This selector is useful when you want to distribute the load evenly across
+/// all datanodes. But **it's not recommended** to use this selector in serious
+/// production environments because it doesn't take into account the load of
+/// each datanode.
+#[derive(Default)]
+pub struct RoundRobinSelector {
+ counter: AtomicUsize,
+}
+
+#[async_trait::async_trait]
+impl Selector for RoundRobinSelector {
+ type Context = SelectorContext;
+ type Output = Vec<Peer>;
+
+ async fn select(
+ &self,
+ ns: Namespace,
+ ctx: &Self::Context,
+ opts: SelectorOptions,
+ ) -> Result<Vec<Peer>> {
+ // 1. get alive datanodes.
+ let lease_kvs =
+ lease::alive_datanodes(ns, &ctx.meta_peer_client, ctx.datanode_lease_secs).await?;
+
+ // 2. map into peers and sort on node id
+ let mut peers: Vec<Peer> = lease_kvs
+ .into_iter()
+ .map(|(k, v)| Peer::new(k.node_id, v.node_addr))
+ .collect();
+ peers.sort_by_key(|p| p.id);
+ ensure!(
+ !peers.is_empty(),
+ NoEnoughAvailableDatanodeSnafu {
+ required: opts.min_required_items,
+ available: 0usize,
+ }
+ );
+
+ // 3. choose peers
+ let mut selected = Vec::with_capacity(opts.min_required_items);
+ for _ in 0..opts.min_required_items {
+ let idx = self
+ .counter
+ .fetch_add(1, std::sync::atomic::Ordering::Relaxed)
+ % peers.len();
+ selected.push(peers[idx].clone());
+ }
+
+ Ok(selected)
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use crate::test_util::{create_selector_context, put_datanodes};
+
+ #[tokio::test]
+ async fn test_round_robin_selector() {
+ let selector = RoundRobinSelector::default();
+ let ctx = create_selector_context();
+ let ns = 0;
+
+ // add three nodes
+ let peer1 = Peer {
+ id: 2,
+ addr: "node1".to_string(),
+ };
+ let peer2 = Peer {
+ id: 5,
+ addr: "node2".to_string(),
+ };
+ let peer3 = Peer {
+ id: 8,
+ addr: "node3".to_string(),
+ };
+ let peers = vec![peer1.clone(), peer2.clone(), peer3.clone()];
+ put_datanodes(ns, &ctx.meta_peer_client, peers).await;
+
+ let peers = selector
+ .select(
+ ns,
+ &ctx,
+ SelectorOptions {
+ min_required_items: 4,
+ allow_duplication: true,
+ },
+ )
+ .await
+ .unwrap();
+ assert_eq!(peers.len(), 4);
+ assert_eq!(
+ peers,
+ vec![peer1.clone(), peer2.clone(), peer3.clone(), peer1.clone()]
+ );
+
+ let peers = selector
+ .select(
+ ns,
+ &ctx,
+ SelectorOptions {
+ min_required_items: 2,
+ allow_duplication: true,
+ },
+ )
+ .await
+ .unwrap();
+ assert_eq!(peers.len(), 2);
+ assert_eq!(peers, vec![peer2.clone(), peer3.clone()]);
+ }
+}
diff --git a/src/meta-srv/src/table_meta_alloc.rs b/src/meta-srv/src/table_meta_alloc.rs
index 636db1b7d6b2..03cbff663a90 100644
--- a/src/meta-srv/src/table_meta_alloc.rs
+++ b/src/meta-srv/src/table_meta_alloc.rs
@@ -31,10 +31,18 @@ pub struct MetasrvPeerAllocator {
}
impl MetasrvPeerAllocator {
+ /// Creates a new [`MetasrvPeerAllocator`] with the given [`SelectorContext`] and [`SelectorRef`].
pub fn new(ctx: SelectorContext, selector: SelectorRef) -> Self {
Self { ctx, selector }
}
+ /// Allocates a specified number (by `regions`) of [`Peer`] instances based on the given
+ /// [`TableMetadataAllocatorContext`] and number of regions. The returned peers will have
+ /// the same length as the number of regions.
+ ///
+ /// This method is mainly a wrapper around the [`SelectorRef`]::`select` method. There is
+ /// no guarantee that how the returned peers are used, like whether they are from the same
+ /// table or not. So this method isn't idempotent.
async fn alloc(
&self,
ctx: &TableMetadataAllocatorContext,
diff --git a/src/meta-srv/src/test_util.rs b/src/meta-srv/src/test_util.rs
index b6fa285311f6..0c9ae03f1f6a 100644
--- a/src/meta-srv/src/test_util.rs
+++ b/src/meta-srv/src/test_util.rs
@@ -24,14 +24,17 @@ use common_meta::peer::Peer;
use common_meta::rpc::router::{Region, RegionRoute};
use common_meta::sequence::SequenceBuilder;
use common_meta::state_store::KvStateStore;
+use common_meta::ClusterId;
use common_procedure::local::{LocalManager, ManagerConfig};
+use common_time::util as time_util;
use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnSchema, RawSchema};
use table::metadata::{RawTableInfo, RawTableMeta, TableIdent, TableType};
use table::requests::TableOptions;
-use crate::cluster::MetaPeerClientBuilder;
+use crate::cluster::{MetaPeerClientBuilder, MetaPeerClientRef};
use crate::handler::{HeartbeatMailbox, Pushers};
+use crate::keys::{LeaseKey, LeaseValue};
use crate::lock::memory::MemLock;
use crate::metasrv::SelectorContext;
use crate::procedure::region_failover::RegionFailoverManager;
@@ -54,17 +57,9 @@ pub(crate) fn new_region_route(region_id: u64, peers: &[Peer], leader_node: u64)
}
}
-pub(crate) fn create_region_failover_manager() -> Arc<RegionFailoverManager> {
- let kv_backend = Arc::new(MemoryKvBackend::new());
-
- let pushers = Pushers::default();
- let mailbox_sequence =
- SequenceBuilder::new("test_heartbeat_mailbox", kv_backend.clone()).build();
- let mailbox = HeartbeatMailbox::create(pushers, mailbox_sequence);
-
- let state_store = Arc::new(KvStateStore::new(kv_backend.clone()));
- let procedure_manager = Arc::new(LocalManager::new(ManagerConfig::default(), state_store));
-
+/// Builds and returns a [`SelectorContext`]. To access its inner state,
+/// use `memory_backend` on [`MetaPeerClientRef`].
+pub(crate) fn create_selector_context() -> SelectorContext {
let in_memory = Arc::new(MemoryKvBackend::new());
let meta_peer_client = MetaPeerClientBuilder::default()
.election(None)
@@ -74,15 +69,30 @@ pub(crate) fn create_region_failover_manager() -> Arc<RegionFailoverManager> {
// Safety: all required fields set at initialization
.unwrap();
- let selector = Arc::new(LeaseBasedSelector);
- let selector_ctx = SelectorContext {
+ SelectorContext {
datanode_lease_secs: 10,
server_addr: "127.0.0.1:3002".to_string(),
- kv_backend: kv_backend.clone(),
+ kv_backend: in_memory,
meta_peer_client,
table_id: None,
- };
+ }
+}
+
+pub(crate) fn create_region_failover_manager() -> Arc<RegionFailoverManager> {
+ let kv_backend = Arc::new(MemoryKvBackend::new());
+
+ let pushers = Pushers::default();
+ let mailbox_sequence =
+ SequenceBuilder::new("test_heartbeat_mailbox", kv_backend.clone()).build();
+ let mailbox = HeartbeatMailbox::create(pushers, mailbox_sequence);
+
+ let state_store = Arc::new(KvStateStore::new(kv_backend.clone()));
+ let procedure_manager = Arc::new(LocalManager::new(ManagerConfig::default(), state_store));
+ let selector = Arc::new(LeaseBasedSelector);
+ let selector_ctx = create_selector_context();
+
+ let in_memory = Arc::new(MemoryKvBackend::new());
Arc::new(RegionFailoverManager::new(
10,
in_memory,
@@ -157,3 +167,29 @@ pub(crate) async fn prepare_table_region_and_info_value(
.await
.unwrap();
}
+
+pub(crate) async fn put_datanodes(
+ cluster_id: ClusterId,
+ meta_peer_client: &MetaPeerClientRef,
+ datanodes: Vec<Peer>,
+) {
+ let backend = meta_peer_client.memory_backend();
+ for datanode in datanodes {
+ let lease_key = LeaseKey {
+ cluster_id,
+ node_id: datanode.id,
+ };
+ let lease_value = LeaseValue {
+ timestamp_millis: time_util::current_time_millis(),
+ node_addr: datanode.addr,
+ };
+ let lease_key_bytes: Vec<u8> = lease_key.try_into().unwrap();
+ let lease_value_bytes: Vec<u8> = lease_value.try_into().unwrap();
+ let put_request = common_meta::rpc::store::PutRequest {
+ key: lease_key_bytes,
+ value: lease_value_bytes,
+ ..Default::default()
+ };
+ backend.put(put_request).await.unwrap();
+ }
+}
|
feat
|
round-robin selector (#4024)
|
baef640fe357d23bed217ac33ae0de26e9434cb4
|
2022-11-28 14:37:17
|
Zheming Li
|
feat: add --version command line option (#632)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index df23ceff8278..9db13d21a68b 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -718,6 +718,17 @@ dependencies = [
"serde",
]
+[[package]]
+name = "build-data"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1a94f9f7aab679acac7ce29ba5581c00d3971a861c3b501c5bb74c3ba0026d90"
+dependencies = [
+ "chrono",
+ "safe-lock",
+ "safe-regex",
+]
+
[[package]]
name = "bumpalo"
version = "3.11.0"
@@ -1105,6 +1116,7 @@ dependencies = [
name = "cmd"
version = "0.1.0"
dependencies = [
+ "build-data",
"clap 3.2.22",
"common-error",
"common-telemetry",
@@ -5190,6 +5202,59 @@ version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09"
+[[package]]
+name = "safe-lock"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "077d73db7973cccf63eb4aff1e5a34dc2459baa867512088269ea5f2f4253c90"
+
+[[package]]
+name = "safe-proc-macro2"
+version = "1.0.36"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "814c536dcd27acf03296c618dab7ad62d28e70abd7ba41d3f34a2ce707a2c666"
+dependencies = [
+ "unicode-xid",
+]
+
+[[package]]
+name = "safe-quote"
+version = "1.0.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "77e530f7831f3feafcd5f1aae406ac205dd998436b4007c8e80f03eca78a88f7"
+dependencies = [
+ "safe-proc-macro2",
+]
+
+[[package]]
+name = "safe-regex"
+version = "0.2.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a15289bf322e0673d52756a18194167f2378ec1a15fe884af6e2d2cb934822b0"
+dependencies = [
+ "safe-regex-macro",
+]
+
+[[package]]
+name = "safe-regex-compiler"
+version = "0.2.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fba76fae590a2aa665279deb1f57b5098cbace01a0c5e60e262fcf55f7c51542"
+dependencies = [
+ "safe-proc-macro2",
+ "safe-quote",
+]
+
+[[package]]
+name = "safe-regex-macro"
+version = "0.2.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "96c2e96b5c03f158d1b16ba79af515137795f4ad4e8de3f790518aae91f1d127"
+dependencies = [
+ "safe-proc-macro2",
+ "safe-regex-compiler",
+]
+
[[package]]
name = "same-file"
version = "1.0.6"
@@ -6802,6 +6867,12 @@ version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b"
+[[package]]
+name = "unicode-xid"
+version = "0.2.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c"
+
[[package]]
name = "unicode_names2"
version = "0.5.1"
diff --git a/src/cmd/Cargo.toml b/src/cmd/Cargo.toml
index c446180738c2..8c8caab22857 100644
--- a/src/cmd/Cargo.toml
+++ b/src/cmd/Cargo.toml
@@ -29,3 +29,6 @@ toml = "0.5"
[dev-dependencies]
serde = "1.0"
tempdir = "0.3"
+
+[build-dependencies]
+build-data = "0.1.3"
diff --git a/src/cmd/build.rs b/src/cmd/build.rs
new file mode 100644
index 000000000000..15d858e8479f
--- /dev/null
+++ b/src/cmd/build.rs
@@ -0,0 +1,19 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+fn main() {
+ build_data::set_GIT_BRANCH();
+ build_data::set_GIT_COMMIT();
+ build_data::set_GIT_DIRTY();
+}
diff --git a/src/cmd/src/bin/greptime.rs b/src/cmd/src/bin/greptime.rs
index 4beb4b805d99..578bee7e3bf2 100644
--- a/src/cmd/src/bin/greptime.rs
+++ b/src/cmd/src/bin/greptime.rs
@@ -20,7 +20,7 @@ use cmd::{datanode, frontend, metasrv, standalone};
use common_telemetry::logging::{error, info};
#[derive(Parser)]
-#[clap(name = "greptimedb")]
+#[clap(name = "greptimedb", version = print_version())]
struct Command {
#[clap(long, default_value = "/tmp/greptimedb/logs")]
log_dir: String,
@@ -70,6 +70,17 @@ impl fmt::Display for SubCommand {
}
}
+fn print_version() -> &'static str {
+ concat!(
+ "\nbranch: ",
+ env!("GIT_BRANCH"),
+ "\ncommit: ",
+ env!("GIT_COMMIT"),
+ "\ndirty: ",
+ env!("GIT_DIRTY")
+ )
+}
+
#[tokio::main]
async fn main() -> Result<()> {
let cmd = Command::parse();
|
feat
|
add --version command line option (#632)
|
971229517705f26b2a11212f5a85f279cab2283e
|
2024-10-30 14:01:31
|
Ruihang Xia
|
fix(config): update tracing section headers in example TOML files (#4898)
| false
|
diff --git a/config/datanode.example.toml b/config/datanode.example.toml
index 557cd4cef02d..6e426e89cdcd 100644
--- a/config/datanode.example.toml
+++ b/config/datanode.example.toml
@@ -646,7 +646,7 @@ url = ""
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
-[tracing]
+#+ [tracing]
## The tokio console address.
## @toml2docs:none-default
-tokio_console_addr = "127.0.0.1"
+#+ tokio_console_addr = "127.0.0.1"
diff --git a/config/flownode.example.toml b/config/flownode.example.toml
index 34825542fa06..ffa992436521 100644
--- a/config/flownode.example.toml
+++ b/config/flownode.example.toml
@@ -101,8 +101,8 @@ threshold = "10s"
sample_ratio = 1.0
## The tracing options. Only effect when compiled with `tokio-console` feature.
-[tracing]
+#+ [tracing]
## The tokio console address.
## @toml2docs:none-default
-tokio_console_addr = "127.0.0.1"
+#+ tokio_console_addr = "127.0.0.1"
diff --git a/config/frontend.example.toml b/config/frontend.example.toml
index 83e7808d4667..1fb372a6d12e 100644
--- a/config/frontend.example.toml
+++ b/config/frontend.example.toml
@@ -231,7 +231,7 @@ url = ""
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
-[tracing]
+#+ [tracing]
## The tokio console address.
## @toml2docs:none-default
-tokio_console_addr = "127.0.0.1"
+#+ tokio_console_addr = "127.0.0.1"
diff --git a/config/metasrv.example.toml b/config/metasrv.example.toml
index 416f5ee6ef23..b80d1c164e0e 100644
--- a/config/metasrv.example.toml
+++ b/config/metasrv.example.toml
@@ -218,7 +218,7 @@ url = ""
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
-[tracing]
+#+ [tracing]
## The tokio console address.
## @toml2docs:none-default
-tokio_console_addr = "127.0.0.1"
+#+ tokio_console_addr = "127.0.0.1"
diff --git a/config/standalone.example.toml b/config/standalone.example.toml
index defd34d8f598..52f6d5b694a1 100644
--- a/config/standalone.example.toml
+++ b/config/standalone.example.toml
@@ -690,7 +690,7 @@ url = ""
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
-[tracing]
+#+ [tracing]
## The tokio console address.
## @toml2docs:none-default
-tokio_console_addr = "127.0.0.1"
+#+ tokio_console_addr = "127.0.0.1"
|
fix
|
update tracing section headers in example TOML files (#4898)
|
1e815dddf1983f4f5100b9978d1eabfb47548665
|
2024-06-25 13:30:48
|
Zhenchi
|
feat(puffin): implement CachedPuffinWriter (#4203)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 3c0924d682b5..f740010071dc 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -573,6 +573,18 @@ dependencies = [
"futures-core",
]
+[[package]]
+name = "async-channel"
+version = "2.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a"
+dependencies = [
+ "concurrent-queue",
+ "event-listener-strategy",
+ "futures-core",
+ "pin-project-lite",
+]
+
[[package]]
name = "async-compression"
version = "0.3.15"
@@ -610,6 +622,17 @@ dependencies = [
"zstd-safe 7.1.0",
]
+[[package]]
+name = "async-fs"
+version = "2.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ebcd09b382f40fcd159c2d695175b2ae620ffa5f3bd6f664131efff4e8b9e04a"
+dependencies = [
+ "async-lock",
+ "blocking",
+ "futures-lite",
+]
+
[[package]]
name = "async-lock"
version = "3.4.0"
@@ -654,6 +677,12 @@ dependencies = [
"syn 2.0.66",
]
+[[package]]
+name = "async-task"
+version = "4.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de"
+
[[package]]
name = "async-trait"
version = "0.1.80"
@@ -665,6 +694,17 @@ dependencies = [
"syn 2.0.66",
]
+[[package]]
+name = "async-walkdir"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "20235b6899dd1cb74a9afac0abf5b4a20c0e500dd6537280f4096e1b9f14da20"
+dependencies = [
+ "async-fs",
+ "futures-lite",
+ "thiserror",
+]
+
[[package]]
name = "asynchronous-codec"
version = "0.7.0"
@@ -702,6 +742,12 @@ version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c59bdb34bc650a32731b31bd8f0829cc15d24a708ee31559e0bb34f2bc320cba"
+[[package]]
+name = "atomic-waker"
+version = "1.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0"
+
[[package]]
name = "atty"
version = "0.2.14"
@@ -1020,6 +1066,19 @@ dependencies = [
"generic-array",
]
+[[package]]
+name = "blocking"
+version = "1.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea"
+dependencies = [
+ "async-channel 2.3.1",
+ "async-task",
+ "futures-io",
+ "futures-lite",
+ "piper",
+]
+
[[package]]
name = "borsh"
version = "1.5.1"
@@ -4181,6 +4240,19 @@ version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1"
+[[package]]
+name = "futures-lite"
+version = "2.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5"
+dependencies = [
+ "fastrand",
+ "futures-core",
+ "futures-io",
+ "parking",
+ "pin-project-lite",
+]
+
[[package]]
name = "futures-macro"
version = "0.3.30"
@@ -6283,7 +6355,7 @@ version = "0.8.2"
dependencies = [
"api",
"aquamarine",
- "async-channel",
+ "async-channel 1.9.0",
"async-stream",
"async-trait",
"bytes",
@@ -7794,6 +7866,17 @@ dependencies = [
"yaml-rust",
]
+[[package]]
+name = "piper"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ae1d5c74c9876f070d3e8fd503d748c7d974c3e48da8f41350fa5222ef9b4391"
+dependencies = [
+ "atomic-waker",
+ "fastrand",
+ "futures-io",
+]
+
[[package]]
name = "pkcs1"
version = "0.3.3"
@@ -8377,7 +8460,9 @@ dependencies = [
name = "puffin"
version = "0.8.2"
dependencies = [
+ "async-compression 0.4.11",
"async-trait",
+ "async-walkdir",
"bitflags 2.5.0",
"common-error",
"common-macro",
@@ -8390,6 +8475,7 @@ dependencies = [
"snafu 0.8.3",
"tokio",
"tokio-util",
+ "uuid",
]
[[package]]
diff --git a/src/mito2/src/sst/index.rs b/src/mito2/src/sst/index.rs
index cb10e7fc912a..ebc561c82973 100644
--- a/src/mito2/src/sst/index.rs
+++ b/src/mito2/src/sst/index.rs
@@ -77,7 +77,7 @@ impl Indexer {
/// Finish the index creation.
/// Returns the number of bytes written if success or None if failed.
- pub async fn finish(&mut self) -> Option<usize> {
+ pub async fn finish(&mut self) -> Option<u64> {
if let Some(mut creator) = self.inner.take() {
match creator.finish().await {
Ok((row_count, byte_count)) => {
diff --git a/src/mito2/src/sst/index/applier.rs b/src/mito2/src/sst/index/applier.rs
index eb4e42cd47bf..aba4534b2847 100644
--- a/src/mito2/src/sst/index/applier.rs
+++ b/src/mito2/src/sst/index/applier.rs
@@ -208,8 +208,9 @@ mod tests {
puffin_writer
.add_blob(Blob {
blob_type: INDEX_BLOB_TYPE.to_string(),
- data: Cursor::new(vec![]),
+ compressed_data: Cursor::new(vec![]),
properties: Default::default(),
+ compression_codec: None,
})
.await
.unwrap();
@@ -260,8 +261,9 @@ mod tests {
puffin_writer
.add_blob(Blob {
blob_type: "invalid_blob_type".to_string(),
- data: Cursor::new(vec![]),
+ compressed_data: Cursor::new(vec![]),
properties: Default::default(),
+ compression_codec: None,
})
.await
.unwrap();
diff --git a/src/mito2/src/sst/index/creator.rs b/src/mito2/src/sst/index/creator.rs
index 45b58f858eca..bdad03c5d362 100644
--- a/src/mito2/src/sst/index/creator.rs
+++ b/src/mito2/src/sst/index/creator.rs
@@ -54,7 +54,7 @@ const MIN_MEMORY_USAGE_THRESHOLD_PER_COLUMN: usize = 1024 * 1024; // 1MB
/// The buffer size for the pipe used to send index data to the puffin blob.
const PIPE_BUFFER_SIZE_FOR_SENDING_BLOB: usize = 8192;
-type ByteCount = usize;
+type ByteCount = u64;
type RowCount = usize;
/// Creates SST index.
@@ -271,8 +271,9 @@ impl SstIndexCreator {
let (tx, rx) = duplex(PIPE_BUFFER_SIZE_FOR_SENDING_BLOB);
let blob = Blob {
blob_type: INDEX_BLOB_TYPE.to_string(),
- data: rx.compat(),
+ compressed_data: rx.compat(),
properties: HashMap::default(),
+ compression_codec: None,
};
let mut index_writer = InvertedIndexBlobWriter::new(tx.compat_write());
@@ -292,7 +293,7 @@ impl SstIndexCreator {
.fail()?,
(Ok(_), e @ Err(_)) => e?,
- (e @ Err(_), Ok(_)) => e?,
+ (e @ Err(_), Ok(_)) => e.map(|_| ())?,
_ => {}
}
diff --git a/src/mito2/src/sst/index/creator/statistics.rs b/src/mito2/src/sst/index/creator/statistics.rs
index 65d01547e980..60cabe44e8d2 100644
--- a/src/mito2/src/sst/index/creator/statistics.rs
+++ b/src/mito2/src/sst/index/creator/statistics.rs
@@ -35,7 +35,7 @@ pub(crate) struct Statistics {
/// Number of rows in the index.
row_count: usize,
/// Number of bytes in the index.
- byte_count: usize,
+ byte_count: u64,
}
impl Statistics {
@@ -63,7 +63,7 @@ impl Statistics {
}
/// Returns byte count.
- pub fn byte_count(&self) -> usize {
+ pub fn byte_count(&self) -> u64 {
self.byte_count
}
}
@@ -112,7 +112,7 @@ impl<'a> TimerGuard<'a> {
}
/// Increases the byte count of the index creation statistics.
- pub fn inc_byte_count(&mut self, n: usize) {
+ pub fn inc_byte_count(&mut self, n: u64) {
self.stats.byte_count += n;
}
}
diff --git a/src/mito2/src/sst/parquet/writer.rs b/src/mito2/src/sst/parquet/writer.rs
index e312d2eaffb7..9a3d852f9cfe 100644
--- a/src/mito2/src/sst/parquet/writer.rs
+++ b/src/mito2/src/sst/parquet/writer.rs
@@ -136,7 +136,7 @@ where
let index_size = self.indexer.finish().await;
let inverted_index_available = index_size.is_some();
- let index_file_size = index_size.unwrap_or(0) as u64;
+ let index_file_size = index_size.unwrap_or(0);
if stats.num_rows == 0 {
return Ok(None);
diff --git a/src/puffin/Cargo.toml b/src/puffin/Cargo.toml
index fea00dc0ba47..5e1a83f6ab7a 100644
--- a/src/puffin/Cargo.toml
+++ b/src/puffin/Cargo.toml
@@ -8,7 +8,9 @@ license.workspace = true
workspace = true
[dependencies]
+async-compression = "0.4.11"
async-trait.workspace = true
+async-walkdir = "2.0.0"
bitflags.workspace = true
common-error.workspace = true
common-macro.workspace = true
@@ -19,7 +21,6 @@ pin-project.workspace = true
serde.workspace = true
serde_json.workspace = true
snafu.workspace = true
-
-[dev-dependencies]
tokio.workspace = true
tokio-util.workspace = true
+uuid.workspace = true
diff --git a/src/puffin/src/blob_metadata.rs b/src/puffin/src/blob_metadata.rs
index fd7d106e02ab..1cdadb592f77 100644
--- a/src/puffin/src/blob_metadata.rs
+++ b/src/puffin/src/blob_metadata.rs
@@ -69,7 +69,7 @@ pub struct BlobMetadata {
}
/// Compression codec used to compress the blob
-#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum CompressionCodec {
/// Single [LZ4 compression frame](https://github.com/lz4/lz4/blob/77d1b93f72628af7bbde0243b4bba9205c3138d9/doc/lz4_Frame_format.md),
diff --git a/src/puffin/src/error.rs b/src/puffin/src/error.rs
index cf861322322f..86f08948f7ff 100644
--- a/src/puffin/src/error.rs
+++ b/src/puffin/src/error.rs
@@ -64,6 +64,30 @@ pub enum Error {
location: Location,
},
+ #[snafu(display("Failed to open"))]
+ Open {
+ #[snafu(source)]
+ error: IoError,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display("Failed to read metadata"))]
+ Metadata {
+ #[snafu(source)]
+ error: IoError,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display("Error while walking directory"))]
+ WalkDirError {
+ #[snafu(source)]
+ error: async_walkdir::Error,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
#[snafu(display("Magic not matched"))]
MagicNotMatched {
#[snafu(implicit)]
@@ -159,6 +183,20 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
+
+ #[snafu(display("Unsupported compression: {codec}"))]
+ UnsupportedCompression {
+ codec: String,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display("Write to the same blob twice: {blob}"))]
+ DuplicateBlob {
+ blob: String,
+ #[snafu(implicit)]
+ location: Location,
+ },
}
impl ErrorExt for Error {
@@ -172,6 +210,8 @@ impl ErrorExt for Error {
| Write { .. }
| Flush { .. }
| Close { .. }
+ | Open { .. }
+ | Metadata { .. }
| SerializeJson { .. }
| BytesToInteger { .. }
| ParseStageNotMatch { .. }
@@ -180,9 +220,14 @@ impl ErrorExt for Error {
| InvalidBlobOffset { .. }
| InvalidBlobAreaEnd { .. }
| Lz4Compression { .. }
- | Lz4Decompression { .. } => StatusCode::Unexpected,
+ | Lz4Decompression { .. }
+ | WalkDirError { .. } => StatusCode::Unexpected,
+
+ UnsupportedCompression { .. } | UnsupportedDecompression { .. } => {
+ StatusCode::Unsupported
+ }
- UnsupportedDecompression { .. } => StatusCode::Unsupported,
+ DuplicateBlob { .. } => StatusCode::InvalidArguments,
}
}
diff --git a/src/puffin/src/file_format/writer.rs b/src/puffin/src/file_format/writer.rs
index 7215fa6f6bd0..bfe717ae49ba 100644
--- a/src/puffin/src/file_format/writer.rs
+++ b/src/puffin/src/file_format/writer.rs
@@ -19,6 +19,7 @@ use std::collections::HashMap;
use async_trait::async_trait;
+use crate::blob_metadata::CompressionCodec;
use crate::error::Result;
pub use crate::file_format::writer::file::PuffinFileWriter;
@@ -30,7 +31,10 @@ pub struct Blob<R> {
pub blob_type: String,
/// The data of the blob
- pub data: R,
+ pub compressed_data: R,
+
+ /// The codec used to compress the blob.
+ pub compression_codec: Option<CompressionCodec>,
/// The properties of the blob
pub properties: HashMap<String, String>,
@@ -45,10 +49,10 @@ pub trait PuffinSyncWriter {
fn set_footer_lz4_compressed(&mut self, lz4_compressed: bool);
/// Add a blob to the Puffin file
- fn add_blob<R: std::io::Read>(&mut self, blob: Blob<R>) -> Result<()>;
+ fn add_blob<R: std::io::Read>(&mut self, blob: Blob<R>) -> Result<u64>;
/// Finish writing the Puffin file, returns the number of bytes written
- fn finish(&mut self) -> Result<usize>;
+ fn finish(&mut self) -> Result<u64>;
}
/// The trait for writing Puffin files asynchronously
@@ -61,8 +65,8 @@ pub trait PuffinAsyncWriter {
fn set_footer_lz4_compressed(&mut self, lz4_compressed: bool);
/// Add a blob to the Puffin file
- async fn add_blob<R: futures::AsyncRead + Send>(&mut self, blob: Blob<R>) -> Result<()>;
+ async fn add_blob<R: futures::AsyncRead + Send>(&mut self, blob: Blob<R>) -> Result<u64>;
/// Finish writing the Puffin file, returns the number of bytes written
- async fn finish(&mut self) -> Result<usize>;
+ async fn finish(&mut self) -> Result<u64>;
}
diff --git a/src/puffin/src/file_format/writer/file.rs b/src/puffin/src/file_format/writer/file.rs
index d10c2aeddeb8..6237453dc6c6 100644
--- a/src/puffin/src/file_format/writer/file.rs
+++ b/src/puffin/src/file_format/writer/file.rs
@@ -75,28 +75,28 @@ impl<W: io::Write> PuffinSyncWriter for PuffinFileWriter<W> {
self.properties = properties;
}
- fn add_blob<R: io::Read>(&mut self, mut blob: Blob<R>) -> Result<()> {
+ fn add_blob<R: io::Read>(&mut self, mut blob: Blob<R>) -> Result<u64> {
self.write_header_if_needed_sync()?;
- let size = io::copy(&mut blob.data, &mut self.writer).context(WriteSnafu)?;
+ let size = io::copy(&mut blob.compressed_data, &mut self.writer).context(WriteSnafu)?;
let blob_metadata = self.create_blob_metadata(blob.blob_type, blob.properties, size);
self.blob_metadata.push(blob_metadata);
self.written_bytes += size;
- Ok(())
+ Ok(size)
}
fn set_footer_lz4_compressed(&mut self, lz4_compressed: bool) {
self.footer_lz4_compressed = lz4_compressed;
}
- fn finish(&mut self) -> Result<usize> {
+ fn finish(&mut self) -> Result<u64> {
self.write_header_if_needed_sync()?;
self.write_footer_sync()?;
self.writer.flush().context(FlushSnafu)?;
- Ok(self.written_bytes as usize)
+ Ok(self.written_bytes)
}
}
@@ -106,10 +106,10 @@ impl<W: AsyncWrite + Unpin + Send> PuffinAsyncWriter for PuffinFileWriter<W> {
self.properties = properties;
}
- async fn add_blob<R: AsyncRead + Send>(&mut self, blob: Blob<R>) -> Result<()> {
+ async fn add_blob<R: AsyncRead + Send>(&mut self, blob: Blob<R>) -> Result<u64> {
self.write_header_if_needed_async().await?;
- let size = futures::io::copy(blob.data, &mut self.writer)
+ let size = futures::io::copy(blob.compressed_data, &mut self.writer)
.await
.context(WriteSnafu)?;
@@ -117,20 +117,20 @@ impl<W: AsyncWrite + Unpin + Send> PuffinAsyncWriter for PuffinFileWriter<W> {
self.blob_metadata.push(blob_metadata);
self.written_bytes += size;
- Ok(())
+ Ok(size)
}
fn set_footer_lz4_compressed(&mut self, lz4_compressed: bool) {
self.footer_lz4_compressed = lz4_compressed;
}
- async fn finish(&mut self) -> Result<usize> {
+ async fn finish(&mut self) -> Result<u64> {
self.write_header_if_needed_async().await?;
self.write_footer_async().await?;
self.writer.flush().await.context(FlushSnafu)?;
self.writer.close().await.context(CloseSnafu)?;
- Ok(self.written_bytes as usize)
+ Ok(self.written_bytes)
}
}
diff --git a/src/puffin/src/lib.rs b/src/puffin/src/lib.rs
index 2be956e43dc3..96a8421f98b5 100644
--- a/src/puffin/src/lib.rs
+++ b/src/puffin/src/lib.rs
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#![feature(trait_alias)]
+
pub mod blob_metadata;
pub mod error;
pub mod file_format;
diff --git a/src/puffin/src/puffin_manager.rs b/src/puffin/src/puffin_manager.rs
index cf3831b58226..933c974ee672 100644
--- a/src/puffin/src/puffin_manager.rs
+++ b/src/puffin/src/puffin_manager.rs
@@ -12,6 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+pub mod cache_manager;
+pub mod cached_puffin_manager;
+
use std::path::PathBuf;
use async_trait::async_trait;
@@ -37,11 +40,14 @@ pub trait PuffinManager {
#[async_trait]
pub trait PuffinWriter {
/// Writes a blob associated with the specified `key` to the Puffin file.
+ /// Returns the number of bytes written.
async fn put_blob<R>(&mut self, key: &str, raw_data: R, options: PutOptions) -> Result<u64>
where
R: AsyncRead + Send;
/// Writes a directory associated with the specified `key` to the Puffin file.
+ /// Returns the number of bytes written.
+ ///
/// The specified `dir` should be accessible from the filesystem.
async fn put_dir(&mut self, key: &str, dir: PathBuf, options: PutOptions) -> Result<u64>;
diff --git a/src/puffin/src/puffin_manager/cache_manager.rs b/src/puffin/src/puffin_manager/cache_manager.rs
new file mode 100644
index 000000000000..e71ae5141d71
--- /dev/null
+++ b/src/puffin/src/puffin_manager/cache_manager.rs
@@ -0,0 +1,81 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::path::PathBuf;
+use std::sync::Arc;
+
+use async_trait::async_trait;
+use futures::future::BoxFuture;
+use futures::{AsyncRead, AsyncSeek, AsyncWrite};
+
+use crate::error::Result;
+
+pub type BoxWriter = Box<dyn AsyncWrite + Unpin + Send>;
+
+/// Result containing the number of bytes written (u64).
+pub type WriteResult = BoxFuture<'static, Result<u64>>;
+
+/// `DirWriterProvider` provides a way to write files into a directory.
+#[async_trait]
+pub trait DirWriterProvider {
+ /// Creates a writer for the given relative path.
+ async fn writer(&self, relative_path: &str) -> Result<BoxWriter>;
+}
+
+pub type DirWriterProviderRef = Box<dyn DirWriterProvider + Send>;
+
+/// Function that initializes a blob.
+///
+/// `CacheManager` will provide a `BoxWriter` that the caller of `get_blob`
+/// can use to write the blob into the cache.
+pub trait InitBlobFn = FnOnce(BoxWriter) -> WriteResult;
+
+/// Function that initializes a directory.
+///
+/// `CacheManager` will provide a `DirWriterProvider` that the caller of `get_dir`
+/// can use to write files inside the directory into the cache.
+pub trait InitDirFn = FnOnce(DirWriterProviderRef) -> WriteResult;
+
+/// `CacheManager` manages the cache for the puffin files.
+#[async_trait]
+pub trait CacheManager {
+ type Reader: AsyncRead + AsyncSeek;
+
+ /// Retrieves a blob, initializing it if necessary using the provided `init_fn`.
+ async fn get_blob<'a>(
+ &self,
+ puffin_file_name: &str,
+ key: &str,
+ init_factory: Box<dyn InitBlobFn + Send + 'a>,
+ ) -> Result<Self::Reader>;
+
+ /// Retrieves a directory, initializing it if necessary using the provided `init_fn`.
+ async fn get_dir<'a>(
+ &self,
+ puffin_file_name: &str,
+ key: &str,
+ init_fn: Box<dyn InitDirFn + Send + 'a>,
+ ) -> Result<PathBuf>;
+
+ /// Stores a directory in the cache.
+ async fn put_dir(
+ &self,
+ puffin_file_name: &str,
+ key: &str,
+ dir_path: PathBuf,
+ dir_size: u64,
+ ) -> Result<()>;
+}
+
+pub type CacheManagerRef<R> = Arc<dyn CacheManager<Reader = R> + Send + Sync>;
diff --git a/src/puffin/src/puffin_manager/cached_puffin_manager.rs b/src/puffin/src/puffin_manager/cached_puffin_manager.rs
new file mode 100644
index 000000000000..984d787e4931
--- /dev/null
+++ b/src/puffin/src/puffin_manager/cached_puffin_manager.rs
@@ -0,0 +1,38 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+mod writer;
+
+use serde::{Deserialize, Serialize};
+pub use writer::CachedPuffinWriter;
+
+/// Metadata for directory in puffin file.
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct DirMetadata {
+ pub files: Vec<DirFileMetadata>,
+}
+
+/// Metadata for file in directory in puffin file.
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct DirFileMetadata {
+ /// The relative path of the file in the directory.
+ pub relative_path: String,
+
+ /// The file is stored as a blob in the puffin file.
+ /// `blob_index` is the index of the blob in the puffin file.
+ pub blob_index: usize,
+
+ /// The key of the blob in the puffin file.
+ pub key: String,
+}
diff --git a/src/puffin/src/puffin_manager/cached_puffin_manager/writer.rs b/src/puffin/src/puffin_manager/cached_puffin_manager/writer.rs
new file mode 100644
index 000000000000..cacc0bad6c5b
--- /dev/null
+++ b/src/puffin/src/puffin_manager/cached_puffin_manager/writer.rs
@@ -0,0 +1,193 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::HashSet;
+use std::path::PathBuf;
+
+use async_compression::futures::bufread::ZstdEncoder;
+use async_trait::async_trait;
+use futures::io::BufReader;
+use futures::{AsyncRead, AsyncSeek, AsyncWrite, StreamExt};
+use snafu::{ensure, ResultExt};
+use tokio_util::compat::TokioAsyncReadCompatExt;
+use uuid::Uuid;
+
+use crate::blob_metadata::CompressionCodec;
+use crate::error::{
+ DuplicateBlobSnafu, MetadataSnafu, OpenSnafu, Result, SerializeJsonSnafu,
+ UnsupportedCompressionSnafu, WalkDirSnafu,
+};
+use crate::file_format::writer::{Blob, PuffinAsyncWriter, PuffinFileWriter};
+use crate::puffin_manager::cache_manager::CacheManagerRef;
+use crate::puffin_manager::cached_puffin_manager::{DirFileMetadata, DirMetadata};
+use crate::puffin_manager::{PuffinWriter, PutOptions};
+
+/// `CachedPuffinWriter` is a `PuffinWriter` that writes blobs and directories to a puffin file.
+pub struct CachedPuffinWriter<CR, W> {
+ /// The name of the puffin file.
+ puffin_file_name: String,
+
+ /// The cache manager.
+ cache_manager: CacheManagerRef<CR>,
+
+ /// The underlying `PuffinFileWriter`.
+ puffin_file_writer: PuffinFileWriter<W>,
+
+ /// Written blob keys.
+ blob_keys: HashSet<String>,
+}
+
+#[async_trait]
+impl<CR, W> PuffinWriter for CachedPuffinWriter<CR, W>
+where
+ CR: AsyncRead + AsyncSeek,
+ W: AsyncWrite + Unpin + Send,
+{
+ async fn put_blob<R>(&mut self, key: &str, raw_data: R, options: PutOptions) -> Result<u64>
+ where
+ R: AsyncRead + Send,
+ {
+ ensure!(
+ !self.blob_keys.contains(key),
+ DuplicateBlobSnafu { blob: key }
+ );
+ ensure!(
+ !matches!(options.compression, Some(CompressionCodec::Lz4)),
+ UnsupportedCompressionSnafu { codec: "lz4" }
+ );
+
+ let written_bytes = match options.compression {
+ Some(CompressionCodec::Lz4) => unreachable!("checked above"),
+ Some(CompressionCodec::Zstd) => {
+ let blob = Blob {
+ blob_type: key.to_string(),
+ compressed_data: ZstdEncoder::new(BufReader::new(raw_data)),
+ compression_codec: options.compression,
+ properties: Default::default(),
+ };
+ self.puffin_file_writer.add_blob(blob).await?
+ }
+ None => {
+ let blob = Blob {
+ blob_type: key.to_string(),
+ compressed_data: raw_data,
+ compression_codec: options.compression,
+ properties: Default::default(),
+ };
+ self.puffin_file_writer.add_blob(blob).await?
+ }
+ };
+
+ self.blob_keys.insert(key.to_string());
+ Ok(written_bytes)
+ }
+
+ async fn put_dir(&mut self, key: &str, dir_path: PathBuf, options: PutOptions) -> Result<u64> {
+ ensure!(
+ !self.blob_keys.contains(key),
+ DuplicateBlobSnafu { blob: key }
+ );
+ ensure!(
+ !matches!(options.compression, Some(CompressionCodec::Lz4)),
+ UnsupportedCompressionSnafu { codec: "lz4" }
+ );
+
+ // Walk the directory and add all files to the puffin file.
+ let mut wd = async_walkdir::WalkDir::new(&dir_path).filter(|entry| async move {
+ match entry.file_type().await {
+ // Ignore directories.
+ Ok(ft) if ft.is_dir() => async_walkdir::Filtering::Ignore,
+ _ => async_walkdir::Filtering::Continue,
+ }
+ });
+
+ let mut dir_size = 0;
+ let mut written_bytes = 0;
+ let mut files = vec![];
+ while let Some(entry) = wd.next().await {
+ let entry = entry.context(WalkDirSnafu)?;
+ dir_size += entry.metadata().await.context(MetadataSnafu)?.len();
+
+ let reader = tokio::fs::File::open(entry.path())
+ .await
+ .context(OpenSnafu)?
+ .compat();
+
+ let file_key = Uuid::new_v4().to_string();
+ match options.compression {
+ Some(CompressionCodec::Lz4) => unreachable!("checked above"),
+ Some(CompressionCodec::Zstd) => {
+ let blob = Blob {
+ blob_type: file_key.clone(),
+ compressed_data: ZstdEncoder::new(BufReader::new(reader)),
+ compression_codec: options.compression,
+ properties: Default::default(),
+ };
+ written_bytes += self.puffin_file_writer.add_blob(blob).await?;
+ }
+ None => {
+ let blob = Blob {
+ blob_type: file_key.clone(),
+ compressed_data: reader,
+ compression_codec: options.compression,
+ properties: Default::default(),
+ };
+ written_bytes += self.puffin_file_writer.add_blob(blob).await?;
+ }
+ }
+
+ let relative_path = entry
+ .path()
+ .strip_prefix(&dir_path)
+ .expect("entry path is under dir path")
+ .to_string_lossy()
+ .into_owned();
+
+ files.push(DirFileMetadata {
+ relative_path,
+ key: file_key.clone(),
+ blob_index: self.blob_keys.len(),
+ });
+ self.blob_keys.insert(file_key);
+ }
+
+ let dir_metadata = DirMetadata { files };
+ let encoded = serde_json::to_vec(&dir_metadata).context(SerializeJsonSnafu)?;
+ let dir_meta_blob = Blob {
+ blob_type: key.to_string(),
+ compressed_data: encoded.as_slice(),
+ compression_codec: None,
+ properties: Default::default(),
+ };
+
+ written_bytes += self.puffin_file_writer.add_blob(dir_meta_blob).await?;
+ self.blob_keys.insert(key.to_string());
+
+ // Move the directory into the cache.
+ self.cache_manager
+ .put_dir(&self.puffin_file_name, key, dir_path, dir_size)
+ .await?;
+ Ok(written_bytes)
+ }
+
+ fn set_footer_lz4_compressed(&mut self, lz4_compressed: bool) {
+ self.puffin_file_writer
+ .set_footer_lz4_compressed(lz4_compressed);
+ }
+
+ async fn finish(mut self) -> Result<u64> {
+ let size = self.puffin_file_writer.finish().await?;
+ Ok(size)
+ }
+}
diff --git a/src/puffin/src/tests.rs b/src/puffin/src/tests.rs
index 4b10c17816a0..5698846f481d 100644
--- a/src/puffin/src/tests.rs
+++ b/src/puffin/src/tests.rs
@@ -189,18 +189,20 @@ fn test_writer_reader_sync() {
let blob1 = "abcdefghi";
writer
.add_blob(Blob {
- data: Cursor::new(&blob1),
+ compressed_data: Cursor::new(&blob1),
blob_type: "some-blob".to_string(),
properties: Default::default(),
+ compression_codec: None,
})
.unwrap();
let blob2 = include_bytes!("tests/resources/sample-metric-data.blob");
writer
.add_blob(Blob {
- data: Cursor::new(&blob2),
+ compressed_data: Cursor::new(&blob2),
blob_type: "some-other-blob".to_string(),
properties: Default::default(),
+ compression_codec: None,
})
.unwrap();
@@ -257,9 +259,10 @@ async fn test_writer_reader_async() {
let blob1 = "abcdefghi".as_bytes();
writer
.add_blob(Blob {
- data: AsyncCursor::new(blob1),
+ compressed_data: AsyncCursor::new(blob1),
blob_type: "some-blob".to_string(),
properties: Default::default(),
+ compression_codec: None,
})
.await
.unwrap();
@@ -267,9 +270,10 @@ async fn test_writer_reader_async() {
let blob2 = include_bytes!("tests/resources/sample-metric-data.blob");
writer
.add_blob(Blob {
- data: AsyncCursor::new(&blob2),
+ compressed_data: AsyncCursor::new(&blob2),
blob_type: "some-other-blob".to_string(),
properties: Default::default(),
+ compression_codec: None,
})
.await
.unwrap();
|
feat
|
implement CachedPuffinWriter (#4203)
|
65c9fbbd2f60fc8f881bcd808bd96c68d0ac30d2
|
2024-07-04 11:48:58
|
Zhenchi
|
feat(fulltext_index): integrate puffin manager with inverted index applier (#4266)
| false
|
diff --git a/config/config.md b/config/config.md
index a594e7368074..32f34304c6c0 100644
--- a/config/config.md
+++ b/config/config.md
@@ -118,12 +118,15 @@
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
+| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
+| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
+| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically<br/>- `disable`: never |
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically<br/>- `disable`: never |
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically<br/>- `disable`: never |
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `64M` | Memory threshold for performing an external sort during index creation.<br/>Setting to empty will disable external sorting, forcing all sorting operations to happen in memory. |
-| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`). |
+| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
| `region_engine.mito.memtable` | -- | -- | -- |
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
@@ -399,12 +402,15 @@
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
+| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
+| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
+| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically<br/>- `disable`: never |
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically<br/>- `disable`: never |
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically<br/>- `disable`: never |
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `64M` | Memory threshold for performing an external sort during index creation.<br/>Setting to empty will disable external sorting, forcing all sorting operations to happen in memory. |
-| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`). |
+| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
| `region_engine.mito.memtable` | -- | -- | -- |
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
diff --git a/config/datanode.example.toml b/config/datanode.example.toml
index b3be8b58361e..c12606110f6e 100644
--- a/config/datanode.example.toml
+++ b/config/datanode.example.toml
@@ -394,6 +394,21 @@ parallel_scan_channel_size = 32
## Whether to allow stale WAL entries read during replay.
allow_stale_entries = false
+## The options for index in Mito engine.
+[region_engine.mito.index]
+
+## Auxiliary directory path for the index in filesystem, used to store intermediate files for
+## creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.
+## The default name for this directory is `index_intermediate` for backward compatibility.
+##
+## This path contains two subdirectories:
+## - `__intm`: for storing intermediate files used during creating index.
+## - `staging`: for storing staging files used during searching index.
+aux_path = ""
+
+## The max capacity of the staging directory.
+staging_size = "2GB"
+
## The options for inverted index in Mito engine.
[region_engine.mito.inverted_index]
@@ -416,7 +431,7 @@ apply_on_query = "auto"
## Setting to empty will disable external sorting, forcing all sorting operations to happen in memory.
mem_threshold_on_create = "64M"
-## File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
+## Deprecated, use `region_engine.mito.index.aux_path` instead.
intermediate_path = ""
[region_engine.mito.memtable]
diff --git a/config/standalone.example.toml b/config/standalone.example.toml
index 0a2544a77219..32c1840eeaff 100644
--- a/config/standalone.example.toml
+++ b/config/standalone.example.toml
@@ -417,6 +417,21 @@ parallel_scan_channel_size = 32
## Whether to allow stale WAL entries read during replay.
allow_stale_entries = false
+## The options for index in Mito engine.
+[region_engine.mito.index]
+
+## Auxiliary directory path for the index in filesystem, used to store intermediate files for
+## creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.
+## The default name for this directory is `index_intermediate` for backward compatibility.
+##
+## This path contains two subdirectories:
+## - `__intm`: for storing intermediate files used during creating index.
+## - `staging`: for storing staging files used during searching index.
+aux_path = ""
+
+## The max capacity of the staging directory.
+staging_size = "2GB"
+
## The options for inverted index in Mito engine.
[region_engine.mito.inverted_index]
@@ -439,7 +454,7 @@ apply_on_query = "auto"
## Setting to empty will disable external sorting, forcing all sorting operations to happen in memory.
mem_threshold_on_create = "64M"
-## File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
+## Deprecated, use `region_engine.mito.index.aux_path` instead.
intermediate_path = ""
[region_engine.mito.memtable]
diff --git a/src/mito2/src/access_layer.rs b/src/mito2/src/access_layer.rs
index 40308124f5f2..98d9396bf76e 100644
--- a/src/mito2/src/access_layer.rs
+++ b/src/mito2/src/access_layer.rs
@@ -27,6 +27,7 @@ use crate::read::Source;
use crate::region::options::IndexOptions;
use crate::sst::file::{FileHandle, FileId, FileMeta};
use crate::sst::index::intermediate::IntermediateManager;
+use crate::sst::index::puffin_manager::PuffinManagerFactory;
use crate::sst::index::IndexerBuilder;
use crate::sst::location;
use crate::sst::parquet::reader::ParquetReaderBuilder;
@@ -40,6 +41,8 @@ pub struct AccessLayer {
region_dir: String,
/// Target object store.
object_store: ObjectStore,
+ /// Puffin manager factory for index.
+ puffin_manager_factory: PuffinManagerFactory,
/// Intermediate manager for inverted index.
intermediate_manager: IntermediateManager,
}
@@ -57,11 +60,13 @@ impl AccessLayer {
pub fn new(
region_dir: impl Into<String>,
object_store: ObjectStore,
+ puffin_manager_factory: PuffinManagerFactory,
intermediate_manager: IntermediateManager,
) -> AccessLayer {
AccessLayer {
region_dir: region_dir.into(),
object_store,
+ puffin_manager_factory,
intermediate_manager,
}
}
@@ -76,6 +81,11 @@ impl AccessLayer {
&self.object_store
}
+ /// Returns the puffin manager factory.
+ pub fn puffin_manager_factory(&self) -> &PuffinManagerFactory {
+ &self.puffin_manager_factory
+ }
+
/// Deletes a SST file (and its index file if it has one) with given file id.
pub(crate) async fn delete_sst(&self, file_meta: &FileMeta) -> Result<()> {
let path = location::sst_file_path(&self.region_dir, file_meta.file_id);
@@ -86,15 +96,13 @@ impl AccessLayer {
file_id: file_meta.file_id,
})?;
- if file_meta.inverted_index_available() {
- let path = location::index_file_path(&self.region_dir, file_meta.file_id);
- self.object_store
- .delete(&path)
- .await
- .context(DeleteIndexSnafu {
- file_id: file_meta.file_id,
- })?;
- }
+ let path = location::index_file_path(&self.region_dir, file_meta.file_id);
+ self.object_store
+ .delete(&path)
+ .await
+ .context(DeleteIndexSnafu {
+ file_id: file_meta.file_id,
+ })?;
Ok(())
}
diff --git a/src/mito2/src/cache/file_cache.rs b/src/mito2/src/cache/file_cache.rs
index 931e5062693a..008a71759318 100644
--- a/src/mito2/src/cache/file_cache.rs
+++ b/src/mito2/src/cache/file_cache.rs
@@ -117,6 +117,7 @@ impl FileCache {
}
/// Reads a file from the cache.
+ #[allow(unused)]
pub(crate) async fn reader(&self, key: IndexKey) -> Option<Reader> {
// We must use `get()` to update the estimator of the cache.
// See https://docs.rs/moka/latest/moka/future/struct.Cache.html#method.contains_key
diff --git a/src/mito2/src/compaction/compactor.rs b/src/mito2/src/compaction/compactor.rs
index 062e5423c4d2..a303367a344b 100644
--- a/src/mito2/src/compaction/compactor.rs
+++ b/src/mito2/src/compaction/compactor.rs
@@ -45,6 +45,7 @@ use crate::schedule::scheduler::LocalScheduler;
use crate::sst::file::{FileMeta, IndexType};
use crate::sst::file_purger::LocalFilePurger;
use crate::sst::index::intermediate::IntermediateManager;
+use crate::sst::index::puffin_manager::PuffinManagerFactory;
use crate::sst::parquet::WriteOptions;
/// CompactionRegion represents a region that needs to be compacted.
@@ -93,13 +94,19 @@ pub async fn open_compaction_region(
};
let access_layer = {
+ let puffin_manager_factory = PuffinManagerFactory::new(
+ &mito_config.index.aux_path,
+ mito_config.index.staging_size.as_bytes(),
+ Some(mito_config.index.write_buffer_size.as_bytes() as _),
+ )
+ .await?;
let intermediate_manager =
- IntermediateManager::init_fs(mito_config.inverted_index.intermediate_path.clone())
- .await?;
+ IntermediateManager::init_fs(mito_config.index.aux_path.clone()).await?;
Arc::new(AccessLayer::new(
req.region_dir.as_str(),
object_store.clone(),
+ puffin_manager_factory,
intermediate_manager,
))
};
@@ -266,7 +273,7 @@ impl Compactor for DefaultCompactor {
let index_write_buffer_size = Some(
compaction_region
.engine_config
- .inverted_index
+ .index
.write_buffer_size
.as_bytes() as usize,
);
diff --git a/src/mito2/src/config.rs b/src/mito2/src/config.rs
index 5f5799ec2f79..04d085dda8e3 100644
--- a/src/mito2/src/config.rs
+++ b/src/mito2/src/config.rs
@@ -15,6 +15,7 @@
//! Configurations.
use std::cmp;
+use std::path::Path;
use std::time::Duration;
use common_base::readable_size::ReadableSize;
@@ -104,6 +105,8 @@ pub struct MitoConfig {
/// Whether to allow stale entries read during replay.
pub allow_stale_entries: bool,
+ /// Index configs.
+ pub index: IndexConfig,
/// Inverted index configs.
pub inverted_index: InvertedIndexConfig,
@@ -134,6 +137,7 @@ impl Default for MitoConfig {
scan_parallelism: divide_num_cpus(4),
parallel_scan_channel_size: DEFAULT_SCAN_CHANNEL_SIZE,
allow_stale_entries: false,
+ index: IndexConfig::default(),
inverted_index: InvertedIndexConfig::default(),
memtable: MemtableConfig::default(),
};
@@ -202,7 +206,7 @@ impl MitoConfig {
self.experimental_write_cache_path = join_dir(data_home, "write_cache");
}
- self.inverted_index.sanitize(data_home)?;
+ self.index.sanitize(data_home, &self.inverted_index)?;
Ok(())
}
@@ -246,6 +250,70 @@ impl MitoConfig {
}
}
+#[serde_as]
+#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
+#[serde(default)]
+pub struct IndexConfig {
+ /// Auxiliary directory path for the index in filesystem, used to
+ /// store intermediate files for creating the index and staging files
+ /// for searching the index, defaults to `{data_home}/index_intermediate`.
+ ///
+ /// This path contains two subdirectories:
+ /// - `__intm`: for storing intermediate files used during creating index.
+ /// - `staging`: for storing staging files used during searching index.
+ ///
+ /// The default name for this directory is `index_intermediate` for backward compatibility.
+ pub aux_path: String,
+
+ /// The max capacity of the staging directory.
+ pub staging_size: ReadableSize,
+
+ /// Write buffer size for creating the index.
+ pub write_buffer_size: ReadableSize,
+}
+
+impl Default for IndexConfig {
+ fn default() -> Self {
+ Self {
+ aux_path: String::new(),
+ staging_size: ReadableSize::gb(2),
+ write_buffer_size: ReadableSize::mb(8),
+ }
+ }
+}
+
+impl IndexConfig {
+ pub fn sanitize(
+ &mut self,
+ data_home: &str,
+ inverted_index: &InvertedIndexConfig,
+ ) -> Result<()> {
+ #[allow(deprecated)]
+ if self.aux_path.is_empty() && !inverted_index.intermediate_path.is_empty() {
+ self.aux_path.clone_from(&inverted_index.intermediate_path);
+ warn!(
+ "`inverted_index.intermediate_path` is deprecated, use
+ `index.aux_path` instead. Set `index.aux_path` to {}",
+ &inverted_index.intermediate_path
+ )
+ }
+ if self.aux_path.is_empty() {
+ let path = Path::new(data_home).join("index_intermediate");
+ self.aux_path = path.as_os_str().to_string_lossy().to_string();
+ }
+
+ if self.write_buffer_size < MULTIPART_UPLOAD_MINIMUM_SIZE {
+ self.write_buffer_size = MULTIPART_UPLOAD_MINIMUM_SIZE;
+ warn!(
+ "Sanitize index write buffer size to {}",
+ self.write_buffer_size
+ );
+ }
+
+ Ok(())
+ }
+}
+
/// Operational mode for certain actions.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Default)]
#[serde(rename_all = "snake_case")]
@@ -280,17 +348,23 @@ pub struct InvertedIndexConfig {
pub create_on_compaction: Mode,
/// Whether to apply the index on query: automatically or never.
pub apply_on_query: Mode,
- /// Write buffer size for creating the index.
- pub write_buffer_size: ReadableSize,
+
/// Memory threshold for performing an external sort during index creation.
/// `None` means all sorting will happen in memory.
#[serde_as(as = "NoneAsEmptyString")]
pub mem_threshold_on_create: Option<ReadableSize>,
- /// File system path to store intermediate files for external sort, defaults to `{data_home}/index_intermediate`.
+
+ #[deprecated = "use [IndexConfig::aux_path] instead"]
+ #[serde(skip_serializing)]
pub intermediate_path: String,
+
+ #[deprecated = "use [IndexConfig::write_buffer_size] instead"]
+ #[serde(skip_serializing)]
+ pub write_buffer_size: ReadableSize,
}
impl Default for InvertedIndexConfig {
+ #[allow(deprecated)]
fn default() -> Self {
Self {
create_on_flush: Mode::Auto,
@@ -303,24 +377,6 @@ impl Default for InvertedIndexConfig {
}
}
-impl InvertedIndexConfig {
- pub fn sanitize(&mut self, data_home: &str) -> Result<()> {
- if self.intermediate_path.is_empty() {
- self.intermediate_path = join_dir(data_home, "index_intermediate");
- }
-
- if self.write_buffer_size < MULTIPART_UPLOAD_MINIMUM_SIZE {
- self.write_buffer_size = MULTIPART_UPLOAD_MINIMUM_SIZE;
- warn!(
- "Sanitize index write buffer size to {}",
- self.write_buffer_size
- );
- }
-
- Ok(())
- }
-}
-
/// Divide cpu num by a non-zero `divisor` and returns at least 1.
fn divide_num_cpus(divisor: usize) -> usize {
debug_assert!(divisor > 0);
diff --git a/src/mito2/src/error.rs b/src/mito2/src/error.rs
index 1306edf09de7..ed665e445c67 100644
--- a/src/mito2/src/error.rs
+++ b/src/mito2/src/error.rs
@@ -597,13 +597,6 @@ pub enum Error {
location: Location,
},
- #[snafu(display("Blob type not found, blob_type: {blob_type}"))]
- PuffinBlobTypeNotFound {
- blob_type: String,
- #[snafu(implicit)]
- location: Location,
- },
-
#[snafu(display("Failed to write puffin completely"))]
PuffinFinish {
source: puffin::error::Error,
@@ -783,6 +776,20 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
+
+ #[snafu(display("Failed to initialize puffin stager"))]
+ PuffinInitStager {
+ source: puffin::error::Error,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display("Failed to build puffin reader"))]
+ PuffinBuildReader {
+ source: puffin::error::Error,
+ #[snafu(implicit)]
+ location: Location,
+ },
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
@@ -821,7 +828,6 @@ impl ErrorExt for Error {
| CreateDefault { .. }
| InvalidParquet { .. }
| OperateAbortedIndex { .. }
- | PuffinBlobTypeNotFound { .. }
| UnexpectedReplay { .. }
| IndexEncodeNull { .. } => StatusCode::Unexpected,
RegionNotFound { .. } => StatusCode::RegionNotFound,
@@ -886,7 +892,9 @@ impl ErrorExt for Error {
PuffinReadMetadata { source, .. }
| PuffinReadBlob { source, .. }
| PuffinFinish { source, .. }
- | PuffinAddBlob { source, .. } => source.status_code(),
+ | PuffinAddBlob { source, .. }
+ | PuffinInitStager { source, .. }
+ | PuffinBuildReader { source, .. } => source.status_code(),
CleanDir { .. } => StatusCode::Unexpected,
InvalidConfig { .. } => StatusCode::InvalidArguments,
StaleLogEntry { .. } => StatusCode::Unexpected,
diff --git a/src/mito2/src/flush.rs b/src/mito2/src/flush.rs
index 971295e08d32..2d573b423b5c 100644
--- a/src/mito2/src/flush.rs
+++ b/src/mito2/src/flush.rs
@@ -327,12 +327,8 @@ impl RegionFlushTask {
.inverted_index
.mem_threshold_on_create
.map(|m| m.as_bytes() as _);
- let index_write_buffer_size = Some(
- self.engine_config
- .inverted_index
- .write_buffer_size
- .as_bytes() as usize,
- );
+ let index_write_buffer_size =
+ Some(self.engine_config.index.write_buffer_size.as_bytes() as usize);
// Flush to level 0.
let write_request = SstWriteRequest {
diff --git a/src/mito2/src/read/scan_region.rs b/src/mito2/src/read/scan_region.rs
index e29b1611a2f7..c25a040295ac 100644
--- a/src/mito2/src/read/scan_region.rs
+++ b/src/mito2/src/read/scan_region.rs
@@ -343,6 +343,7 @@ impl ScanRegion {
.iter()
.copied()
.collect(),
+ self.access_layer.puffin_manager_factory().clone(),
)
.build(&self.request.filters)
.inspect_err(|err| warn!(err; "Failed to build index applier"))
diff --git a/src/mito2/src/region/opener.rs b/src/mito2/src/region/opener.rs
index 50aa7c68cd37..65429478f575 100644
--- a/src/mito2/src/region/opener.rs
+++ b/src/mito2/src/region/opener.rs
@@ -48,6 +48,7 @@ use crate::request::OptionOutputTx;
use crate::schedule::scheduler::SchedulerRef;
use crate::sst::file_purger::LocalFilePurger;
use crate::sst::index::intermediate::IntermediateManager;
+use crate::sst::index::puffin_manager::PuffinManagerFactory;
use crate::time_provider::{StdTimeProvider, TimeProviderRef};
use crate::wal::entry_reader::WalEntryReader;
use crate::wal::{EntryId, Wal};
@@ -63,6 +64,7 @@ pub(crate) struct RegionOpener {
options: Option<RegionOptions>,
cache_manager: Option<CacheManagerRef>,
skip_wal_replay: bool,
+ puffin_manager_factory: PuffinManagerFactory,
intermediate_manager: IntermediateManager,
time_provider: Option<TimeProviderRef>,
stats: ManifestStats,
@@ -77,6 +79,7 @@ impl RegionOpener {
memtable_builder_provider: MemtableBuilderProvider,
object_store_manager: ObjectStoreManagerRef,
purge_scheduler: SchedulerRef,
+ puffin_manager_factory: PuffinManagerFactory,
intermediate_manager: IntermediateManager,
) -> RegionOpener {
RegionOpener {
@@ -89,6 +92,7 @@ impl RegionOpener {
options: None,
cache_manager: None,
skip_wal_replay: false,
+ puffin_manager_factory,
intermediate_manager,
time_provider: None,
stats: Default::default(),
@@ -216,6 +220,7 @@ impl RegionOpener {
let access_layer = Arc::new(AccessLayer::new(
self.region_dir,
object_store,
+ self.puffin_manager_factory,
self.intermediate_manager,
));
let time_provider = self
@@ -317,6 +322,7 @@ impl RegionOpener {
let access_layer = Arc::new(AccessLayer::new(
self.region_dir.clone(),
object_store,
+ self.puffin_manager_factory.clone(),
self.intermediate_manager.clone(),
));
let file_purger = Arc::new(LocalFilePurger::new(
diff --git a/src/mito2/src/sst/file_purger.rs b/src/mito2/src/sst/file_purger.rs
index 4f8117093320..0753b1a3eb76 100644
--- a/src/mito2/src/sst/file_purger.rs
+++ b/src/mito2/src/sst/file_purger.rs
@@ -97,7 +97,6 @@ impl FilePurger for LocalFilePurger {
mod tests {
use common_test_util::temp_dir::create_temp_dir;
use object_store::services::Fs;
- use object_store::util::join_dir;
use object_store::ObjectStore;
use smallvec::SmallVec;
@@ -106,6 +105,7 @@ mod tests {
use crate::schedule::scheduler::{LocalScheduler, Scheduler};
use crate::sst::file::{FileHandle, FileId, FileMeta, FileTimeRange, IndexType};
use crate::sst::index::intermediate::IntermediateManager;
+ use crate::sst::index::puffin_manager::PuffinManagerFactory;
use crate::sst::location;
#[tokio::test]
@@ -119,7 +119,12 @@ mod tests {
let sst_file_id = FileId::random();
let sst_dir = "table1";
let path = location::sst_file_path(sst_dir, sst_file_id);
- let intm_mgr = IntermediateManager::init_fs(join_dir(&dir_path, "intm"))
+
+ let index_aux_path = dir.path().join("index_aux");
+ let puffin_mgr = PuffinManagerFactory::new(&index_aux_path, 4096, None)
+ .await
+ .unwrap();
+ let intm_mgr = IntermediateManager::init_fs(index_aux_path.to_str().unwrap())
.await
.unwrap();
@@ -127,7 +132,12 @@ mod tests {
object_store.write(&path, vec![0; 4096]).await.unwrap();
let scheduler = Arc::new(LocalScheduler::new(3));
- let layer = Arc::new(AccessLayer::new(sst_dir, object_store.clone(), intm_mgr));
+ let layer = Arc::new(AccessLayer::new(
+ sst_dir,
+ object_store.clone(),
+ puffin_mgr,
+ intm_mgr,
+ ));
let file_purger = Arc::new(LocalFilePurger::new(scheduler.clone(), layer, None));
@@ -165,11 +175,16 @@ mod tests {
builder.root(&dir_path);
let sst_file_id = FileId::random();
let sst_dir = "table1";
- let intm_mgr = IntermediateManager::init_fs(join_dir(&dir_path, "intm"))
+
+ let index_aux_path = dir.path().join("index_aux");
+ let puffin_mgr = PuffinManagerFactory::new(&index_aux_path, 4096, None)
+ .await
+ .unwrap();
+ let intm_mgr = IntermediateManager::init_fs(index_aux_path.to_str().unwrap())
.await
.unwrap();
- let path = location::sst_file_path(sst_dir, sst_file_id);
+ let path = location::sst_file_path(sst_dir, sst_file_id);
let object_store = ObjectStore::new(builder).unwrap().finish();
object_store.write(&path, vec![0; 4096]).await.unwrap();
@@ -180,7 +195,12 @@ mod tests {
.unwrap();
let scheduler = Arc::new(LocalScheduler::new(3));
- let layer = Arc::new(AccessLayer::new(sst_dir, object_store.clone(), intm_mgr));
+ let layer = Arc::new(AccessLayer::new(
+ sst_dir,
+ object_store.clone(),
+ puffin_mgr,
+ intm_mgr,
+ ));
let file_purger = Arc::new(LocalFilePurger::new(scheduler.clone(), layer, None));
diff --git a/src/mito2/src/sst/index.rs b/src/mito2/src/sst/index.rs
index ebc561c82973..5bfee47ef765 100644
--- a/src/mito2/src/sst/index.rs
+++ b/src/mito2/src/sst/index.rs
@@ -16,6 +16,7 @@ pub(crate) mod applier;
mod codec;
pub(crate) mod creator;
pub(crate) mod intermediate;
+pub(crate) mod puffin_manager;
mod store;
use std::num::NonZeroUsize;
diff --git a/src/mito2/src/sst/index/applier.rs b/src/mito2/src/sst/index/applier.rs
index a823de56c891..d99d5ea8cdfe 100644
--- a/src/mito2/src/sst/index/applier.rs
+++ b/src/mito2/src/sst/index/applier.rs
@@ -16,27 +16,21 @@ pub mod builder;
use std::sync::Arc;
-use futures::{AsyncRead, AsyncSeek};
+use common_telemetry::warn;
use index::inverted_index::format::reader::InvertedIndexBlobReader;
use index::inverted_index::search::index_apply::{
ApplyOutput, IndexApplier, IndexNotFoundStrategy, SearchContext,
};
use object_store::ObjectStore;
-use puffin::file_format::reader::{AsyncReader, PuffinFileReader};
-use snafu::{OptionExt, ResultExt};
+use puffin::puffin_manager::{BlobGuard, PuffinManager, PuffinReader};
+use snafu::ResultExt;
use store_api::storage::RegionId;
use crate::cache::file_cache::{FileCacheRef, FileType, IndexKey};
-use crate::error::{
- ApplyIndexSnafu, OpenDalSnafu, PuffinBlobTypeNotFoundSnafu, PuffinReadBlobSnafu,
- PuffinReadMetadataSnafu, Result,
-};
-use crate::metrics::{
- INDEX_APPLY_ELAPSED, INDEX_APPLY_MEMORY_USAGE, INDEX_PUFFIN_READ_BYTES_TOTAL,
- INDEX_PUFFIN_READ_OP_TOTAL, INDEX_PUFFIN_SEEK_OP_TOTAL,
-};
+use crate::error::{ApplyIndexSnafu, PuffinBuildReaderSnafu, PuffinReadBlobSnafu, Result};
+use crate::metrics::{INDEX_APPLY_ELAPSED, INDEX_APPLY_MEMORY_USAGE};
use crate::sst::file::FileId;
-use crate::sst::index::store::InstrumentedStore;
+use crate::sst::index::puffin_manager::{BlobReader, PuffinManagerFactory};
use crate::sst::index::INDEX_BLOB_TYPE;
use crate::sst::location;
@@ -50,7 +44,7 @@ pub(crate) struct SstIndexApplier {
region_id: RegionId,
/// Store responsible for accessing remote index files.
- store: InstrumentedStore,
+ store: ObjectStore,
/// The cache of index files.
file_cache: Option<FileCacheRef>,
@@ -58,6 +52,9 @@ pub(crate) struct SstIndexApplier {
/// Predefined index applier used to apply predicates to index files
/// and return the relevant row group ids for further scan.
index_applier: Box<dyn IndexApplier>,
+
+ /// The puffin manager factory.
+ puffin_manager_factory: PuffinManagerFactory,
}
pub(crate) type SstIndexApplierRef = Arc<SstIndexApplier>;
@@ -67,18 +64,20 @@ impl SstIndexApplier {
pub fn new(
region_dir: String,
region_id: RegionId,
- object_store: ObjectStore,
+ store: ObjectStore,
file_cache: Option<FileCacheRef>,
index_applier: Box<dyn IndexApplier>,
+ puffin_manager_factory: PuffinManagerFactory,
) -> Self {
INDEX_APPLY_MEMORY_USAGE.add(index_applier.memory_usage() as i64);
Self {
region_dir,
region_id,
- store: InstrumentedStore::new(object_store),
+ store,
file_cache,
index_applier,
+ puffin_manager_factory,
}
}
@@ -91,94 +90,65 @@ impl SstIndexApplier {
index_not_found_strategy: IndexNotFoundStrategy::ReturnEmpty,
};
- match self.cached_puffin_reader(file_id).await? {
- Some(mut puffin_reader) => {
- let blob_reader = Self::index_blob_reader(&mut puffin_reader).await?;
- let mut index_reader = InvertedIndexBlobReader::new(blob_reader);
- self.index_applier
- .apply(context, &mut index_reader)
- .await
- .context(ApplyIndexSnafu)
+ let blob = match self.cached_blob_reader(file_id).await {
+ Ok(Some(puffin_reader)) => puffin_reader,
+ other => {
+ if let Err(err) = other {
+ warn!(err; "An unexpected error occurred while reading the cached index file. Fallback to remote index file.")
+ }
+ self.remote_blob_reader(file_id).await?
}
- None => {
- let mut puffin_reader = self.remote_puffin_reader(file_id).await?;
- let blob_reader = Self::index_blob_reader(&mut puffin_reader).await?;
- let mut index_reader = InvertedIndexBlobReader::new(blob_reader);
- self.index_applier
- .apply(context, &mut index_reader)
- .await
- .context(ApplyIndexSnafu)
- }
- }
+ };
+ let mut blob_reader = InvertedIndexBlobReader::new(blob);
+ let output = self
+ .index_applier
+ .apply(context, &mut blob_reader)
+ .await
+ .context(ApplyIndexSnafu)?;
+ Ok(output)
}
- /// Helper function to create a [`PuffinFileReader`] from the cached index file.
- async fn cached_puffin_reader(
- &self,
- file_id: FileId,
- ) -> Result<Option<PuffinFileReader<impl AsyncRead + AsyncSeek>>> {
+ /// Creates a blob reader from the cached index file.
+ async fn cached_blob_reader(&self, file_id: FileId) -> Result<Option<BlobReader>> {
let Some(file_cache) = &self.file_cache else {
return Ok(None);
};
- let Some(indexed_value) = file_cache
- .get(IndexKey::new(self.region_id, file_id, FileType::Puffin))
- .await
- else {
+ let index_key = IndexKey::new(self.region_id, file_id, FileType::Puffin);
+ if file_cache.get(index_key).await.is_none() {
return Ok(None);
};
- let Some(reader) = file_cache
- .reader(IndexKey::new(self.region_id, file_id, FileType::Puffin))
- .await
- else {
- return Ok(None);
- };
+ let puffin_manager = self.puffin_manager_factory.build(file_cache.local_store());
+ let puffin_file_name = file_cache.cache_file_path(index_key);
- let reader = reader
- .into_futures_async_read(0..indexed_value.file_size as u64)
+ let reader = puffin_manager
+ .reader(&puffin_file_name)
.await
- .context(OpenDalSnafu)?;
-
- Ok(Some(PuffinFileReader::new(reader)))
+ .context(PuffinBuildReaderSnafu)?
+ .blob(INDEX_BLOB_TYPE)
+ .await
+ .context(PuffinReadBlobSnafu)?
+ .reader()
+ .await
+ .context(PuffinBuildReaderSnafu)?;
+ Ok(Some(reader))
}
- /// Helper function to create a [`PuffinFileReader`] from the remote index file.
- async fn remote_puffin_reader(
- &self,
- file_id: FileId,
- ) -> Result<PuffinFileReader<impl AsyncRead + AsyncSeek>> {
+ /// Creates a blob reader from the remote index file.
+ async fn remote_blob_reader(&self, file_id: FileId) -> Result<BlobReader> {
+ let puffin_manager = self.puffin_manager_factory.build(self.store.clone());
let file_path = location::index_file_path(&self.region_dir, file_id);
- let file_reader = self
- .store
- .reader(
- &file_path,
- &INDEX_PUFFIN_READ_BYTES_TOTAL,
- &INDEX_PUFFIN_READ_OP_TOTAL,
- &INDEX_PUFFIN_SEEK_OP_TOTAL,
- )
- .await?;
- Ok(PuffinFileReader::new(file_reader))
- }
-
- /// Helper function to create a [`PuffinBlobReader`] for the index blob of the provided index file reader.
- async fn index_blob_reader(
- puffin_reader: &mut PuffinFileReader<impl AsyncRead + AsyncSeek + Unpin + Send>,
- ) -> Result<impl AsyncRead + AsyncSeek + '_> {
- let file_meta = puffin_reader
- .metadata()
+ puffin_manager
+ .reader(&file_path)
+ .await
+ .context(PuffinBuildReaderSnafu)?
+ .blob(INDEX_BLOB_TYPE)
.await
- .context(PuffinReadMetadataSnafu)?;
- let blob_meta = file_meta
- .blobs
- .iter()
- .find(|blob| blob.blob_type == INDEX_BLOB_TYPE)
- .context(PuffinBlobTypeNotFoundSnafu {
- blob_type: INDEX_BLOB_TYPE,
- })?;
- puffin_reader
- .blob_reader(blob_meta)
- .context(PuffinReadBlobSnafu)
+ .context(PuffinReadBlobSnafu)?
+ .reader()
+ .await
+ .context(PuffinBuildReaderSnafu)
}
}
@@ -194,35 +164,26 @@ mod tests {
use futures::io::Cursor;
use index::inverted_index::search::index_apply::MockIndexApplier;
use object_store::services::Memory;
- use puffin::file_format::writer::{AsyncWriter, Blob, PuffinFileWriter};
+ use puffin::puffin_manager::PuffinWriter;
use super::*;
- use crate::error::Error;
#[tokio::test]
async fn test_index_applier_apply_basic() {
+ let (_d, puffin_manager_factory) =
+ PuffinManagerFactory::new_for_test_async("test_index_applier_apply_basic_").await;
let object_store = ObjectStore::new(Memory::default()).unwrap().finish();
let file_id = FileId::random();
let region_dir = "region_dir".to_string();
let path = location::index_file_path(®ion_dir, file_id);
- let mut puffin_writer = PuffinFileWriter::new(
- object_store
- .writer(&path)
- .await
- .unwrap()
- .into_futures_async_write(),
- );
- puffin_writer
- .add_blob(Blob {
- blob_type: INDEX_BLOB_TYPE.to_string(),
- compressed_data: Cursor::new(vec![]),
- properties: Default::default(),
- compression_codec: None,
- })
+ let puffin_manager = puffin_manager_factory.build(object_store.clone());
+ let mut writer = puffin_manager.writer(&path).await.unwrap();
+ writer
+ .put_blob(INDEX_BLOB_TYPE, Cursor::new(vec![]), Default::default())
.await
.unwrap();
- puffin_writer.finish().await.unwrap();
+ writer.finish().await.unwrap();
let mut mock_index_applier = MockIndexApplier::new();
mock_index_applier.expect_memory_usage().returning(|| 100);
@@ -240,6 +201,7 @@ mod tests {
object_store,
None,
Box::new(mock_index_applier),
+ puffin_manager_factory,
);
let output = sst_index_applier.apply(file_id).await.unwrap();
assert_eq!(
@@ -254,28 +216,21 @@ mod tests {
#[tokio::test]
async fn test_index_applier_apply_invalid_blob_type() {
+ let (_d, puffin_manager_factory) =
+ PuffinManagerFactory::new_for_test_async("test_index_applier_apply_invalid_blob_type_")
+ .await;
let object_store = ObjectStore::new(Memory::default()).unwrap().finish();
let file_id = FileId::random();
let region_dir = "region_dir".to_string();
let path = location::index_file_path(®ion_dir, file_id);
- let mut puffin_writer = PuffinFileWriter::new(
- object_store
- .writer(&path)
- .await
- .unwrap()
- .into_futures_async_write(),
- );
- puffin_writer
- .add_blob(Blob {
- blob_type: "invalid_blob_type".to_string(),
- compressed_data: Cursor::new(vec![]),
- properties: Default::default(),
- compression_codec: None,
- })
+ let puffin_manager = puffin_manager_factory.build(object_store.clone());
+ let mut writer = puffin_manager.writer(&path).await.unwrap();
+ writer
+ .put_blob("invalid_blob_type", Cursor::new(vec![]), Default::default())
.await
.unwrap();
- puffin_writer.finish().await.unwrap();
+ writer.finish().await.unwrap();
let mut mock_index_applier = MockIndexApplier::new();
mock_index_applier.expect_memory_usage().returning(|| 100);
@@ -287,8 +242,9 @@ mod tests {
object_store,
None,
Box::new(mock_index_applier),
+ puffin_manager_factory,
);
let res = sst_index_applier.apply(file_id).await;
- assert!(matches!(res, Err(Error::PuffinBlobTypeNotFound { .. })));
+ assert!(format!("{:?}", res.unwrap_err()).contains("Blob not found"));
}
}
diff --git a/src/mito2/src/sst/index/applier/builder.rs b/src/mito2/src/sst/index/applier/builder.rs
index c414e91deb48..1a4c1735ab95 100644
--- a/src/mito2/src/sst/index/applier/builder.rs
+++ b/src/mito2/src/sst/index/applier/builder.rs
@@ -38,6 +38,7 @@ use crate::error::{BuildIndexApplierSnafu, ColumnNotFoundSnafu, ConvertValueSnaf
use crate::row_converter::SortField;
use crate::sst::index::applier::SstIndexApplier;
use crate::sst::index::codec::IndexValueCodec;
+use crate::sst::index::puffin_manager::PuffinManagerFactory;
/// Constructs an [`SstIndexApplier`] which applies predicates to SST files during scan.
pub(crate) struct SstIndexApplierBuilder<'a> {
@@ -58,6 +59,9 @@ pub(crate) struct SstIndexApplierBuilder<'a> {
/// Stores predicates during traversal on the Expr tree.
output: HashMap<ColumnId, Vec<Predicate>>,
+
+ /// The puffin manager factory.
+ puffin_manager_factory: PuffinManagerFactory,
}
impl<'a> SstIndexApplierBuilder<'a> {
@@ -68,6 +72,7 @@ impl<'a> SstIndexApplierBuilder<'a> {
file_cache: Option<FileCacheRef>,
metadata: &'a RegionMetadata,
ignore_column_ids: HashSet<ColumnId>,
+ puffin_manager_factory: PuffinManagerFactory,
) -> Self {
Self {
region_dir,
@@ -76,6 +81,7 @@ impl<'a> SstIndexApplierBuilder<'a> {
metadata,
ignore_column_ids,
output: HashMap::default(),
+ puffin_manager_factory,
}
}
@@ -102,6 +108,7 @@ impl<'a> SstIndexApplierBuilder<'a> {
self.object_store,
self.file_cache,
Box::new(applier.context(BuildIndexApplierSnafu)?),
+ self.puffin_manager_factory,
)))
}
@@ -306,6 +313,8 @@ mod tests {
#[test]
fn test_collect_and_basic() {
+ let (_d, facotry) = PuffinManagerFactory::new_for_test_block("test_collect_and_basic_");
+
let metadata = test_region_metadata();
let mut builder = SstIndexApplierBuilder::new(
"test".to_string(),
@@ -313,6 +322,7 @@ mod tests {
None,
&metadata,
HashSet::default(),
+ facotry,
);
let expr = Expr::BinaryExpr(BinaryExpr {
diff --git a/src/mito2/src/sst/index/applier/builder/between.rs b/src/mito2/src/sst/index/applier/builder/between.rs
index 9f761328f350..00740c852119 100644
--- a/src/mito2/src/sst/index/applier/builder/between.rs
+++ b/src/mito2/src/sst/index/applier/builder/between.rs
@@ -66,9 +66,11 @@ mod tests {
encoded_string, field_column, int64_lit, nonexistent_column, string_lit, tag_column,
test_object_store, test_region_metadata,
};
+ use crate::sst::index::puffin_manager::PuffinManagerFactory;
#[test]
fn test_collect_between_basic() {
+ let (_d, facotry) = PuffinManagerFactory::new_for_test_block("test_collect_between_basic_");
let metadata = test_region_metadata();
let mut builder = SstIndexApplierBuilder::new(
"test".to_string(),
@@ -76,6 +78,7 @@ mod tests {
None,
&metadata,
HashSet::default(),
+ facotry,
);
let between = Between {
@@ -108,6 +111,8 @@ mod tests {
#[test]
fn test_collect_between_negated() {
+ let (_d, facotry) =
+ PuffinManagerFactory::new_for_test_block("test_collect_between_negated_");
let metadata = test_region_metadata();
let mut builder = SstIndexApplierBuilder::new(
"test".to_string(),
@@ -115,6 +120,7 @@ mod tests {
None,
&metadata,
HashSet::default(),
+ facotry,
);
let between = Between {
@@ -130,6 +136,8 @@ mod tests {
#[test]
fn test_collect_between_field_column() {
+ let (_d, facotry) =
+ PuffinManagerFactory::new_for_test_block("test_collect_between_field_column_");
let metadata = test_region_metadata();
let mut builder = SstIndexApplierBuilder::new(
"test".to_string(),
@@ -137,6 +145,7 @@ mod tests {
None,
&metadata,
HashSet::default(),
+ facotry,
);
let between = Between {
@@ -152,6 +161,8 @@ mod tests {
#[test]
fn test_collect_between_type_mismatch() {
+ let (_d, facotry) =
+ PuffinManagerFactory::new_for_test_block("test_collect_between_type_mismatch_");
let metadata = test_region_metadata();
let mut builder = SstIndexApplierBuilder::new(
"test".to_string(),
@@ -159,6 +170,7 @@ mod tests {
None,
&metadata,
HashSet::default(),
+ facotry,
);
let between = Between {
@@ -175,6 +187,8 @@ mod tests {
#[test]
fn test_collect_between_nonexistent_column() {
+ let (_d, facotry) =
+ PuffinManagerFactory::new_for_test_block("test_collect_between_nonexistent_column_");
let metadata = test_region_metadata();
let mut builder = SstIndexApplierBuilder::new(
"test".to_string(),
@@ -182,6 +196,7 @@ mod tests {
None,
&metadata,
HashSet::default(),
+ facotry,
);
let between = Between {
diff --git a/src/mito2/src/sst/index/applier/builder/comparison.rs b/src/mito2/src/sst/index/applier/builder/comparison.rs
index 4914a7578cb5..74a67aac6ff8 100644
--- a/src/mito2/src/sst/index/applier/builder/comparison.rs
+++ b/src/mito2/src/sst/index/applier/builder/comparison.rs
@@ -138,6 +138,7 @@ mod tests {
encoded_string, field_column, int64_lit, nonexistent_column, string_lit, tag_column,
test_object_store, test_region_metadata,
};
+ use crate::sst::index::puffin_manager::PuffinManagerFactory;
#[test]
fn test_collect_comparison_basic() {
@@ -224,6 +225,8 @@ mod tests {
),
];
+ let (_d, facotry) =
+ PuffinManagerFactory::new_for_test_block("test_collect_comparison_basic_");
let metadata = test_region_metadata();
let mut builder = SstIndexApplierBuilder::new(
"test".to_string(),
@@ -231,6 +234,7 @@ mod tests {
None,
&metadata,
HashSet::default(),
+ facotry,
);
for ((left, op, right), _) in &cases {
@@ -249,6 +253,8 @@ mod tests {
#[test]
fn test_collect_comparison_type_mismatch() {
+ let (_d, facotry) =
+ PuffinManagerFactory::new_for_test_block("test_collect_comparison_type_mismatch_");
let metadata = test_region_metadata();
let mut builder = SstIndexApplierBuilder::new(
"test".to_string(),
@@ -256,6 +262,7 @@ mod tests {
None,
&metadata,
HashSet::default(),
+ facotry,
);
let res = builder.collect_comparison_expr(&tag_column(), &Operator::Lt, &int64_lit(10));
@@ -265,6 +272,8 @@ mod tests {
#[test]
fn test_collect_comparison_field_column() {
+ let (_d, facotry) =
+ PuffinManagerFactory::new_for_test_block("test_collect_comparison_field_column_");
let metadata = test_region_metadata();
let mut builder = SstIndexApplierBuilder::new(
"test".to_string(),
@@ -272,6 +281,7 @@ mod tests {
None,
&metadata,
HashSet::default(),
+ facotry,
);
builder
@@ -282,6 +292,8 @@ mod tests {
#[test]
fn test_collect_comparison_nonexistent_column() {
+ let (_d, facotry) =
+ PuffinManagerFactory::new_for_test_block("test_collect_comparison_nonexistent_column_");
let metadata = test_region_metadata();
let mut builder = SstIndexApplierBuilder::new(
"test".to_string(),
@@ -289,6 +301,7 @@ mod tests {
None,
&metadata,
HashSet::default(),
+ facotry,
);
let res = builder.collect_comparison_expr(
diff --git a/src/mito2/src/sst/index/applier/builder/eq_list.rs b/src/mito2/src/sst/index/applier/builder/eq_list.rs
index 23a4d7516da3..a01f77d41392 100644
--- a/src/mito2/src/sst/index/applier/builder/eq_list.rs
+++ b/src/mito2/src/sst/index/applier/builder/eq_list.rs
@@ -128,9 +128,11 @@ mod tests {
encoded_string, field_column, int64_lit, nonexistent_column, string_lit, tag_column,
tag_column2, test_object_store, test_region_metadata,
};
+ use crate::sst::index::puffin_manager::PuffinManagerFactory;
#[test]
fn test_collect_eq_basic() {
+ let (_d, facotry) = PuffinManagerFactory::new_for_test_block("test_collect_eq_basic_");
let metadata = test_region_metadata();
let mut builder = SstIndexApplierBuilder::new(
"test".to_string(),
@@ -138,6 +140,7 @@ mod tests {
None,
&metadata,
HashSet::default(),
+ facotry,
);
builder
@@ -165,6 +168,8 @@ mod tests {
#[test]
fn test_collect_eq_field_column() {
+ let (_d, facotry) =
+ PuffinManagerFactory::new_for_test_block("test_collect_eq_field_column_");
let metadata = test_region_metadata();
let mut builder = SstIndexApplierBuilder::new(
"test".to_string(),
@@ -172,6 +177,7 @@ mod tests {
None,
&metadata,
HashSet::default(),
+ facotry,
);
builder
@@ -182,6 +188,8 @@ mod tests {
#[test]
fn test_collect_eq_nonexistent_column() {
+ let (_d, facotry) =
+ PuffinManagerFactory::new_for_test_block("test_collect_eq_nonexistent_column_");
let metadata = test_region_metadata();
let mut builder = SstIndexApplierBuilder::new(
"test".to_string(),
@@ -189,6 +197,7 @@ mod tests {
None,
&metadata,
HashSet::default(),
+ facotry,
);
let res = builder.collect_eq(&nonexistent_column(), &string_lit("abc"));
@@ -198,6 +207,8 @@ mod tests {
#[test]
fn test_collect_eq_type_mismatch() {
+ let (_d, facotry) =
+ PuffinManagerFactory::new_for_test_block("test_collect_eq_type_mismatch_");
let metadata = test_region_metadata();
let mut builder = SstIndexApplierBuilder::new(
"test".to_string(),
@@ -205,6 +216,7 @@ mod tests {
None,
&metadata,
HashSet::default(),
+ facotry,
);
let res = builder.collect_eq(&tag_column(), &int64_lit(1));
@@ -214,6 +226,8 @@ mod tests {
#[test]
fn test_collect_or_eq_list_basic() {
+ let (_d, facotry) =
+ PuffinManagerFactory::new_for_test_block("test_collect_or_eq_list_basic_");
let metadata = test_region_metadata();
let mut builder = SstIndexApplierBuilder::new(
"test".to_string(),
@@ -221,6 +235,7 @@ mod tests {
None,
&metadata,
HashSet::default(),
+ facotry,
);
let eq_expr = DfExpr::BinaryExpr(BinaryExpr {
@@ -269,6 +284,8 @@ mod tests {
#[test]
fn test_collect_or_eq_list_invalid_op() {
+ let (_d, facotry) =
+ PuffinManagerFactory::new_for_test_block("test_collect_or_eq_list_invalid_op_");
let metadata = test_region_metadata();
let mut builder = SstIndexApplierBuilder::new(
"test".to_string(),
@@ -276,6 +293,7 @@ mod tests {
None,
&metadata,
HashSet::default(),
+ facotry,
);
let eq_expr = DfExpr::BinaryExpr(BinaryExpr {
@@ -303,6 +321,8 @@ mod tests {
#[test]
fn test_collect_or_eq_list_multiple_columns() {
+ let (_d, facotry) =
+ PuffinManagerFactory::new_for_test_block("test_collect_or_eq_list_multiple_columns_");
let metadata = test_region_metadata();
let mut builder = SstIndexApplierBuilder::new(
"test".to_string(),
@@ -310,6 +330,7 @@ mod tests {
None,
&metadata,
HashSet::default(),
+ facotry,
);
let eq_expr = DfExpr::BinaryExpr(BinaryExpr {
diff --git a/src/mito2/src/sst/index/applier/builder/in_list.rs b/src/mito2/src/sst/index/applier/builder/in_list.rs
index ead08943fa39..c9e00685309d 100644
--- a/src/mito2/src/sst/index/applier/builder/in_list.rs
+++ b/src/mito2/src/sst/index/applier/builder/in_list.rs
@@ -59,9 +59,11 @@ mod tests {
encoded_string, field_column, int64_lit, nonexistent_column, string_lit, tag_column,
test_object_store, test_region_metadata,
};
+ use crate::sst::index::puffin_manager::PuffinManagerFactory;
#[test]
fn test_collect_in_list_basic() {
+ let (_d, facotry) = PuffinManagerFactory::new_for_test_block("test_collect_in_list_basic_");
let metadata = test_region_metadata();
let mut builder = SstIndexApplierBuilder::new(
"test".to_string(),
@@ -69,6 +71,7 @@ mod tests {
None,
&metadata,
HashSet::default(),
+ facotry,
);
let in_list = InList {
@@ -91,6 +94,8 @@ mod tests {
#[test]
fn test_collect_in_list_negated() {
+ let (_d, facotry) =
+ PuffinManagerFactory::new_for_test_block("test_collect_in_list_negated_");
let metadata = test_region_metadata();
let mut builder = SstIndexApplierBuilder::new(
"test".to_string(),
@@ -98,6 +103,7 @@ mod tests {
None,
&metadata,
HashSet::default(),
+ facotry,
);
let in_list = InList {
@@ -112,6 +118,8 @@ mod tests {
#[test]
fn test_collect_in_list_field_column() {
+ let (_d, facotry) =
+ PuffinManagerFactory::new_for_test_block("test_collect_in_list_field_column_");
let metadata = test_region_metadata();
let mut builder = SstIndexApplierBuilder::new(
"test".to_string(),
@@ -119,6 +127,7 @@ mod tests {
None,
&metadata,
HashSet::default(),
+ facotry,
);
let in_list = InList {
@@ -133,6 +142,8 @@ mod tests {
#[test]
fn test_collect_in_list_type_mismatch() {
+ let (_d, facotry) =
+ PuffinManagerFactory::new_for_test_block("test_collect_in_list_type_mismatch_");
let metadata = test_region_metadata();
let mut builder = SstIndexApplierBuilder::new(
"test".to_string(),
@@ -140,6 +151,7 @@ mod tests {
None,
&metadata,
HashSet::default(),
+ facotry,
);
let in_list = InList {
@@ -155,6 +167,9 @@ mod tests {
#[test]
fn test_collect_in_list_nonexistent_column() {
+ let (_d, facotry) =
+ PuffinManagerFactory::new_for_test_block("test_collect_in_list_nonexistent_column_");
+
let metadata = test_region_metadata();
let mut builder = SstIndexApplierBuilder::new(
"test".to_string(),
@@ -162,6 +177,7 @@ mod tests {
None,
&metadata,
HashSet::default(),
+ facotry,
);
let in_list = InList {
diff --git a/src/mito2/src/sst/index/applier/builder/regex_match.rs b/src/mito2/src/sst/index/applier/builder/regex_match.rs
index b318fd6308e8..f341a03a6988 100644
--- a/src/mito2/src/sst/index/applier/builder/regex_match.rs
+++ b/src/mito2/src/sst/index/applier/builder/regex_match.rs
@@ -53,9 +53,11 @@ mod tests {
field_column, int64_lit, nonexistent_column, string_lit, tag_column, test_object_store,
test_region_metadata,
};
+ use crate::sst::index::puffin_manager::PuffinManagerFactory;
#[test]
fn test_regex_match_basic() {
+ let (_d, facotry) = PuffinManagerFactory::new_for_test_block("test_regex_match_basic_");
let metadata = test_region_metadata();
let mut builder = SstIndexApplierBuilder::new(
"test".to_string(),
@@ -63,6 +65,7 @@ mod tests {
None,
&metadata,
HashSet::default(),
+ facotry,
);
builder
@@ -81,6 +84,8 @@ mod tests {
#[test]
fn test_regex_match_field_column() {
+ let (_d, facotry) =
+ PuffinManagerFactory::new_for_test_block("test_regex_match_field_column_");
let metadata = test_region_metadata();
let mut builder = SstIndexApplierBuilder::new(
"test".to_string(),
@@ -88,6 +93,7 @@ mod tests {
None,
&metadata,
HashSet::default(),
+ facotry,
);
builder
@@ -99,6 +105,8 @@ mod tests {
#[test]
fn test_regex_match_type_mismatch() {
+ let (_d, facotry) =
+ PuffinManagerFactory::new_for_test_block("test_regex_match_type_mismatch_");
let metadata = test_region_metadata();
let mut builder = SstIndexApplierBuilder::new(
"test".to_string(),
@@ -106,6 +114,7 @@ mod tests {
None,
&metadata,
HashSet::default(),
+ facotry,
);
builder
@@ -117,6 +126,8 @@ mod tests {
#[test]
fn test_regex_match_type_nonexist_column() {
+ let (_d, facotry) =
+ PuffinManagerFactory::new_for_test_block("test_regex_match_type_nonexist_column_");
let metadata = test_region_metadata();
let mut builder = SstIndexApplierBuilder::new(
"test".to_string(),
@@ -124,6 +135,7 @@ mod tests {
None,
&metadata,
HashSet::default(),
+ facotry,
);
let res = builder.collect_regex_match(&nonexistent_column(), &string_lit("abc"));
diff --git a/src/mito2/src/sst/index/creator.rs b/src/mito2/src/sst/index/creator.rs
index 548f1f9349d2..a2553baa236b 100644
--- a/src/mito2/src/sst/index/creator.rs
+++ b/src/mito2/src/sst/index/creator.rs
@@ -332,6 +332,7 @@ mod tests {
use super::*;
use crate::row_converter::{McmpRowCodec, RowCodec, SortField};
use crate::sst::index::applier::builder::SstIndexApplierBuilder;
+ use crate::sst::index::puffin_manager::PuffinManagerFactory;
use crate::sst::location;
fn mock_object_store() -> ObjectStore {
@@ -403,8 +404,10 @@ mod tests {
}
async fn build_applier_factory(
+ prefix: &str,
tags: BTreeSet<(&'static str, i32)>,
) -> impl Fn(DfExpr) -> BoxFuture<'static, Vec<usize>> {
+ let (d, factory) = PuffinManagerFactory::new_for_test_async(prefix).await;
let region_dir = "region0".to_string();
let sst_file_id = FileId::random();
let file_path = location::index_file_path(®ion_dir, sst_file_id);
@@ -433,12 +436,14 @@ mod tests {
assert_eq!(row_count, tags.len() * segment_row_count);
move |expr| {
+ let _d = &d;
let applier = SstIndexApplierBuilder::new(
region_dir.clone(),
object_store.clone(),
None,
®ion_metadata,
Default::default(),
+ factory.clone(),
)
.build(&[expr])
.unwrap()
@@ -469,7 +474,7 @@ mod tests {
("abc", 3),
]);
- let applier_factory = build_applier_factory(tags).await;
+ let applier_factory = build_applier_factory("test_create_and_query_get_key_", tags).await;
let expr = col("tag_str").eq(lit("aaa"));
let res = applier_factory(expr).await;
@@ -508,7 +513,7 @@ mod tests {
("abc", 3),
]);
- let applier_factory = build_applier_factory(tags).await;
+ let applier_factory = build_applier_factory("test_create_and_query_range_", tags).await;
let expr = col("tag_str").between(lit("aaa"), lit("aab"));
let res = applier_factory(expr).await;
@@ -541,7 +546,8 @@ mod tests {
("abc", 3),
]);
- let applier_factory = build_applier_factory(tags).await;
+ let applier_factory =
+ build_applier_factory("test_create_and_query_comparison_", tags).await;
let expr = col("tag_str").lt(lit("aab"));
let res = applier_factory(expr).await;
@@ -600,7 +606,7 @@ mod tests {
("abc", 3),
]);
- let applier_factory = build_applier_factory(tags).await;
+ let applier_factory = build_applier_factory("test_create_and_query_regex_", tags).await;
let expr = binary_expr(col("tag_str"), Operator::RegexMatch, lit(".*"));
let res = applier_factory(expr).await;
diff --git a/src/mito2/src/sst/index/creator/statistics.rs b/src/mito2/src/sst/index/creator/statistics.rs
index 60cabe44e8d2..bcf6569d4809 100644
--- a/src/mito2/src/sst/index/creator/statistics.rs
+++ b/src/mito2/src/sst/index/creator/statistics.rs
@@ -16,6 +16,9 @@ use std::time::{Duration, Instant};
use crate::metrics::{INDEX_CREATE_BYTES_TOTAL, INDEX_CREATE_ELAPSED, INDEX_CREATE_ROWS_TOTAL};
+pub(crate) type ByteCount = u64;
+pub(crate) type RowCount = usize;
+
/// Stage of the index creation process.
enum Stage {
Update,
@@ -33,9 +36,9 @@ pub(crate) struct Statistics {
/// Accumulated elapsed time for the cleanup stage.
cleanup_eplased: Duration,
/// Number of rows in the index.
- row_count: usize,
+ row_count: RowCount,
/// Number of bytes in the index.
- byte_count: u64,
+ byte_count: ByteCount,
}
impl Statistics {
@@ -58,12 +61,12 @@ impl Statistics {
}
/// Returns row count.
- pub fn row_count(&self) -> usize {
+ pub fn row_count(&self) -> RowCount {
self.row_count
}
/// Returns byte count.
- pub fn byte_count(&self) -> u64 {
+ pub fn byte_count(&self) -> ByteCount {
self.byte_count
}
}
diff --git a/src/mito2/src/sst/index/intermediate.rs b/src/mito2/src/sst/index/intermediate.rs
index cf48c9e6ebc8..18e63e827c73 100644
--- a/src/mito2/src/sst/index/intermediate.rs
+++ b/src/mito2/src/sst/index/intermediate.rs
@@ -33,8 +33,8 @@ pub struct IntermediateManager {
impl IntermediateManager {
/// Create a new `IntermediateManager` with the given root path.
/// It will clean up all garbage intermediate files from previous runs.
- pub async fn init_fs(root_path: impl AsRef<str>) -> Result<Self> {
- let store = new_fs_object_store(&normalize_dir(root_path.as_ref())).await?;
+ pub async fn init_fs(aux_path: impl AsRef<str>) -> Result<Self> {
+ let store = new_fs_object_store(&normalize_dir(aux_path.as_ref())).await?;
let store = InstrumentedStore::new(store);
// Remove all garbage intermediate files from previous runs.
diff --git a/src/mito2/src/sst/index/puffin_manager.rs b/src/mito2/src/sst/index/puffin_manager.rs
new file mode 100644
index 000000000000..85cfbfd6b72a
--- /dev/null
+++ b/src/mito2/src/sst/index/puffin_manager.rs
@@ -0,0 +1,207 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::path::Path;
+use std::sync::Arc;
+
+use async_trait::async_trait;
+use common_error::ext::BoxedError;
+use object_store::{FuturesAsyncReader, FuturesAsyncWriter, ObjectStore};
+use puffin::error::{self as puffin_error, Result as PuffinResult};
+use puffin::puffin_manager::file_accessor::PuffinFileAccessor;
+use puffin::puffin_manager::fs_puffin_manager::FsPuffinManager;
+use puffin::puffin_manager::stager::{BoundedStager, FsBlobGuard, FsDirGuard};
+use puffin::puffin_manager::BlobGuard;
+use snafu::ResultExt;
+
+use crate::error::{PuffinInitStagerSnafu, Result};
+use crate::metrics::{
+ INDEX_PUFFIN_FLUSH_OP_TOTAL, INDEX_PUFFIN_READ_BYTES_TOTAL, INDEX_PUFFIN_READ_OP_TOTAL,
+ INDEX_PUFFIN_SEEK_OP_TOTAL, INDEX_PUFFIN_WRITE_BYTES_TOTAL, INDEX_PUFFIN_WRITE_OP_TOTAL,
+};
+use crate::sst::index::store::{self, InstrumentedStore};
+
+type InstrumentedAsyncRead = store::InstrumentedAsyncRead<'static, FuturesAsyncReader>;
+type InstrumentedAsyncWrite = store::InstrumentedAsyncWrite<'static, FuturesAsyncWriter>;
+
+pub(crate) type BlobReader = <Arc<FsBlobGuard> as BlobGuard>::Reader;
+pub(crate) type SstPuffinManager = FsPuffinManager<
+ Arc<FsBlobGuard>,
+ Arc<FsDirGuard>,
+ InstrumentedAsyncRead,
+ InstrumentedAsyncWrite,
+>;
+
+const STAGING_DIR: &str = "staging";
+
+/// A factory for creating `SstPuffinManager` instances.
+#[derive(Clone)]
+pub struct PuffinManagerFactory {
+ /// The stager used by the puffin manager.
+ stager: Arc<BoundedStager>,
+
+ /// The size of the write buffer used to create object store.
+ write_buffer_size: Option<usize>,
+}
+
+impl PuffinManagerFactory {
+ /// Creates a new `PuffinManagerFactory` instance.
+ pub async fn new(
+ aux_path: impl AsRef<Path>,
+ staging_capacity: u64,
+ write_buffer_size: Option<usize>,
+ ) -> Result<Self> {
+ let staging_dir = aux_path.as_ref().join(STAGING_DIR);
+ let stager = BoundedStager::new(staging_dir, staging_capacity)
+ .await
+ .context(PuffinInitStagerSnafu)?;
+ Ok(Self {
+ stager: Arc::new(stager),
+ write_buffer_size,
+ })
+ }
+
+ pub(crate) fn build(&self, store: ObjectStore) -> SstPuffinManager {
+ let store = InstrumentedStore::new(store).with_write_buffer_size(self.write_buffer_size);
+ let puffin_file_accessor = ObjectStorePuffinFileAccessor::new(store);
+ SstPuffinManager::new(self.stager.clone(), Arc::new(puffin_file_accessor))
+ }
+}
+
+impl PuffinManagerFactory {
+ #[cfg(test)]
+ pub(crate) async fn new_for_test_async(
+ prefix: &str,
+ ) -> (common_test_util::temp_dir::TempDir, Self) {
+ let tempdir = common_test_util::temp_dir::create_temp_dir(prefix);
+ let factory = Self::new(tempdir.path().to_path_buf(), 1024, None)
+ .await
+ .unwrap();
+ (tempdir, factory)
+ }
+
+ #[cfg(test)]
+ pub(crate) fn new_for_test_block(prefix: &str) -> (common_test_util::temp_dir::TempDir, Self) {
+ let tempdir = common_test_util::temp_dir::create_temp_dir(prefix);
+
+ let f = Self::new(tempdir.path().to_path_buf(), 1024, None);
+ let factory = common_runtime::block_on_bg(f).unwrap();
+
+ (tempdir, factory)
+ }
+}
+
+/// A `PuffinFileAccessor` implementation that uses an object store as the underlying storage.
+pub(crate) struct ObjectStorePuffinFileAccessor {
+ object_store: InstrumentedStore,
+}
+
+impl ObjectStorePuffinFileAccessor {
+ pub fn new(object_store: InstrumentedStore) -> Self {
+ Self { object_store }
+ }
+}
+
+#[async_trait]
+impl PuffinFileAccessor for ObjectStorePuffinFileAccessor {
+ type Reader = InstrumentedAsyncRead;
+ type Writer = InstrumentedAsyncWrite;
+
+ async fn reader(&self, puffin_file_name: &str) -> PuffinResult<Self::Reader> {
+ self.object_store
+ .reader(
+ puffin_file_name,
+ &INDEX_PUFFIN_READ_BYTES_TOTAL,
+ &INDEX_PUFFIN_READ_OP_TOTAL,
+ &INDEX_PUFFIN_SEEK_OP_TOTAL,
+ )
+ .await
+ .map_err(BoxedError::new)
+ .context(puffin_error::ExternalSnafu)
+ }
+
+ async fn writer(&self, puffin_file_name: &str) -> PuffinResult<Self::Writer> {
+ self.object_store
+ .writer(
+ puffin_file_name,
+ &INDEX_PUFFIN_WRITE_BYTES_TOTAL,
+ &INDEX_PUFFIN_WRITE_OP_TOTAL,
+ &INDEX_PUFFIN_FLUSH_OP_TOTAL,
+ )
+ .await
+ .map_err(BoxedError::new)
+ .context(puffin_error::ExternalSnafu)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use common_test_util::temp_dir::create_temp_dir;
+ use futures::io::Cursor;
+ use futures::AsyncReadExt;
+ use object_store::services::Memory;
+ use puffin::blob_metadata::CompressionCodec;
+ use puffin::puffin_manager::{
+ BlobGuard, DirGuard, PuffinManager, PuffinReader, PuffinWriter, PutOptions,
+ };
+
+ use super::*;
+
+ #[tokio::test]
+ async fn test_puffin_manager_factory() {
+ let (_dir, factory) =
+ PuffinManagerFactory::new_for_test_async("test_puffin_manager_factory_").await;
+
+ let object_store = ObjectStore::new(Memory::default()).unwrap().finish();
+ let manager = factory.build(object_store);
+
+ let file_name = "my-puffin-file";
+ let blob_key = "blob-key";
+ let dir_key = "dir-key";
+ let raw_data = b"hello world!";
+
+ let mut writer = manager.writer(file_name).await.unwrap();
+ writer
+ .put_blob(blob_key, Cursor::new(raw_data), PutOptions::default())
+ .await
+ .unwrap();
+ let dir_data = create_temp_dir("test_puffin_manager_factory_dir_data_");
+ tokio::fs::write(dir_data.path().join("hello"), raw_data)
+ .await
+ .unwrap();
+ writer
+ .put_dir(
+ dir_key,
+ dir_data.path().into(),
+ PutOptions {
+ compression: Some(CompressionCodec::Zstd),
+ },
+ )
+ .await
+ .unwrap();
+ writer.finish().await.unwrap();
+
+ let reader = manager.reader(file_name).await.unwrap();
+ let blob_guard = reader.blob(blob_key).await.unwrap();
+ let mut blob_reader = blob_guard.reader().await.unwrap();
+ let mut buf = Vec::new();
+ blob_reader.read_to_end(&mut buf).await.unwrap();
+ assert_eq!(buf, raw_data);
+
+ let dir_guard = reader.dir(dir_key).await.unwrap();
+ let file = dir_guard.path().join("hello");
+ let data = tokio::fs::read(file).await.unwrap();
+ assert_eq!(data, raw_data);
+ }
+}
diff --git a/src/mito2/src/test_util/scheduler_util.rs b/src/mito2/src/test_util/scheduler_util.rs
index 590c66e08cf8..a6ffe0b2bf97 100644
--- a/src/mito2/src/test_util/scheduler_util.rs
+++ b/src/mito2/src/test_util/scheduler_util.rs
@@ -20,7 +20,6 @@ use common_base::Plugins;
use common_datasource::compression::CompressionType;
use common_test_util::temp_dir::{create_temp_dir, TempDir};
use object_store::services::Fs;
-use object_store::util::join_dir;
use object_store::ObjectStore;
use store_api::metadata::RegionMetadataRef;
use tokio::sync::mpsc::Sender;
@@ -36,6 +35,7 @@ use crate::region::{ManifestContext, ManifestContextRef, RegionState};
use crate::request::WorkerRequest;
use crate::schedule::scheduler::{Job, LocalScheduler, Scheduler, SchedulerRef};
use crate::sst::index::intermediate::IntermediateManager;
+use crate::sst::index::puffin_manager::PuffinManagerFactory;
use crate::worker::WorkerListener;
/// Scheduler mocker.
@@ -55,11 +55,20 @@ impl SchedulerEnv {
let mut builder = Fs::default();
builder.root(&path_str);
- let intm_mgr = IntermediateManager::init_fs(join_dir(&path_str, "intm"))
+ let index_aux_path = path.path().join("index_aux");
+ let puffin_mgr = PuffinManagerFactory::new(&index_aux_path, 4096, None)
+ .await
+ .unwrap();
+ let intm_mgr = IntermediateManager::init_fs(index_aux_path.to_str().unwrap())
.await
.unwrap();
let object_store = ObjectStore::new(builder).unwrap().finish();
- let access_layer = Arc::new(AccessLayer::new("", object_store.clone(), intm_mgr));
+ let access_layer = Arc::new(AccessLayer::new(
+ "",
+ object_store.clone(),
+ puffin_mgr,
+ intm_mgr,
+ ));
SchedulerEnv {
path,
diff --git a/src/mito2/src/worker.rs b/src/mito2/src/worker.rs
index 2aa251fc10d3..2a9edf15f4a4 100644
--- a/src/mito2/src/worker.rs
+++ b/src/mito2/src/worker.rs
@@ -58,6 +58,7 @@ use crate::request::{
};
use crate::schedule::scheduler::{LocalScheduler, SchedulerRef};
use crate::sst::index::intermediate::IntermediateManager;
+use crate::sst::index::puffin_manager::PuffinManagerFactory;
use crate::time_provider::{StdTimeProvider, TimeProviderRef};
use crate::wal::Wal;
@@ -132,10 +133,15 @@ impl WorkerGroup {
let write_buffer_manager = Arc::new(WriteBufferManagerImpl::new(
config.global_write_buffer_size.as_bytes() as usize,
));
- let intermediate_manager =
- IntermediateManager::init_fs(&config.inverted_index.intermediate_path)
- .await?
- .with_buffer_size(Some(config.inverted_index.write_buffer_size.as_bytes() as _));
+ let puffin_manager_factory = PuffinManagerFactory::new(
+ &config.index.aux_path,
+ config.index.staging_size.as_bytes(),
+ Some(config.index.write_buffer_size.as_bytes() as _),
+ )
+ .await?;
+ let intermediate_manager = IntermediateManager::init_fs(&config.index.aux_path)
+ .await?
+ .with_buffer_size(Some(config.index.write_buffer_size.as_bytes() as _));
let scheduler = Arc::new(LocalScheduler::new(config.max_background_jobs));
// We use another scheduler to avoid purge jobs blocking other jobs.
// A purge job is cheaper than other background jobs so they share the same job limit.
@@ -169,6 +175,7 @@ impl WorkerGroup {
purge_scheduler: purge_scheduler.clone(),
listener: WorkerListener::default(),
cache_manager: cache_manager.clone(),
+ puffin_manager_factory: puffin_manager_factory.clone(),
intermediate_manager: intermediate_manager.clone(),
time_provider: time_provider.clone(),
flush_sender: flush_sender.clone(),
@@ -261,10 +268,15 @@ impl WorkerGroup {
});
let scheduler = Arc::new(LocalScheduler::new(config.max_background_jobs));
let purge_scheduler = Arc::new(LocalScheduler::new(config.max_background_jobs));
- let intermediate_manager =
- IntermediateManager::init_fs(&config.inverted_index.intermediate_path)
- .await?
- .with_buffer_size(Some(config.inverted_index.write_buffer_size.as_bytes() as _));
+ let puffin_manager_factory = PuffinManagerFactory::new(
+ &config.index.aux_path,
+ config.index.staging_size.as_bytes(),
+ Some(config.index.write_buffer_size.as_bytes() as _),
+ )
+ .await?;
+ let intermediate_manager = IntermediateManager::init_fs(&config.index.aux_path)
+ .await?
+ .with_buffer_size(Some(config.index.write_buffer_size.as_bytes() as _));
let write_cache = write_cache_from_config(
&config,
object_store_manager.clone(),
@@ -292,6 +304,7 @@ impl WorkerGroup {
purge_scheduler: purge_scheduler.clone(),
listener: WorkerListener::new(listener.clone()),
cache_manager: cache_manager.clone(),
+ puffin_manager_factory: puffin_manager_factory.clone(),
intermediate_manager: intermediate_manager.clone(),
time_provider: time_provider.clone(),
flush_sender: flush_sender.clone(),
@@ -361,6 +374,7 @@ struct WorkerStarter<S> {
purge_scheduler: SchedulerRef,
listener: WorkerListener,
cache_manager: CacheManagerRef,
+ puffin_manager_factory: PuffinManagerFactory,
intermediate_manager: IntermediateManager,
time_provider: TimeProviderRef,
/// Watch channel sender to notify workers to handle stalled requests.
@@ -408,6 +422,7 @@ impl<S: LogStore> WorkerStarter<S> {
stalled_requests: StalledRequests::default(),
listener: self.listener,
cache_manager: self.cache_manager,
+ puffin_manager_factory: self.puffin_manager_factory,
intermediate_manager: self.intermediate_manager,
time_provider: self.time_provider,
last_periodical_check_millis: now,
@@ -586,6 +601,8 @@ struct RegionWorkerLoop<S> {
listener: WorkerListener,
/// Cache.
cache_manager: CacheManagerRef,
+ /// Puffin manager factory for index.
+ puffin_manager_factory: PuffinManagerFactory,
/// Intermediate manager for inverted index.
intermediate_manager: IntermediateManager,
/// Provider to get current time.
diff --git a/src/mito2/src/worker/handle_catchup.rs b/src/mito2/src/worker/handle_catchup.rs
index a4353fe52952..e01680ab17b6 100644
--- a/src/mito2/src/worker/handle_catchup.rs
+++ b/src/mito2/src/worker/handle_catchup.rs
@@ -54,6 +54,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
self.memtable_builder_provider.clone(),
self.object_store_manager.clone(),
self.purge_scheduler.clone(),
+ self.puffin_manager_factory.clone(),
self.intermediate_manager.clone(),
)
.cache(Some(self.cache_manager.clone()))
diff --git a/src/mito2/src/worker/handle_create.rs b/src/mito2/src/worker/handle_create.rs
index f07a1f38a183..e99c0a810237 100644
--- a/src/mito2/src/worker/handle_create.rs
+++ b/src/mito2/src/worker/handle_create.rs
@@ -61,6 +61,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
self.memtable_builder_provider.clone(),
self.object_store_manager.clone(),
self.purge_scheduler.clone(),
+ self.puffin_manager_factory.clone(),
self.intermediate_manager.clone(),
)
.metadata(metadata)
diff --git a/src/mito2/src/worker/handle_open.rs b/src/mito2/src/worker/handle_open.rs
index 840e19583c49..d87f531a7220 100644
--- a/src/mito2/src/worker/handle_open.rs
+++ b/src/mito2/src/worker/handle_open.rs
@@ -93,6 +93,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
self.memtable_builder_provider.clone(),
self.object_store_manager.clone(),
self.purge_scheduler.clone(),
+ self.puffin_manager_factory.clone(),
self.intermediate_manager.clone(),
)
.skip_wal_replay(request.skip_wal_replay)
diff --git a/src/puffin/src/error.rs b/src/puffin/src/error.rs
index 8a28dffdcb54..b30c542f4ea8 100644
--- a/src/puffin/src/error.rs
+++ b/src/puffin/src/error.rs
@@ -16,7 +16,7 @@ use std::any::Any;
use std::io::Error as IoError;
use std::sync::Arc;
-use common_error::ext::ErrorExt;
+use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use snafu::{Location, Snafu};
@@ -248,6 +248,14 @@ pub enum Error {
#[snafu(display("Get value from cache"))]
CacheGet { source: Arc<Error> },
+
+ #[snafu(display("External error"))]
+ External {
+ #[snafu(source)]
+ error: BoxedError,
+ #[snafu(implicit)]
+ location: Location,
+ },
}
impl ErrorExt for Error {
@@ -287,6 +295,8 @@ impl ErrorExt for Error {
DuplicateBlob { .. } => StatusCode::InvalidArguments,
CacheGet { source } => source.status_code(),
+
+ External { error, .. } => error.status_code(),
}
}
diff --git a/src/puffin/src/puffin_manager/stager.rs b/src/puffin/src/puffin_manager/stager.rs
index c390e9910a61..47d2eb8eb04c 100644
--- a/src/puffin/src/puffin_manager/stager.rs
+++ b/src/puffin/src/puffin_manager/stager.rs
@@ -18,7 +18,7 @@ use std::path::PathBuf;
use std::sync::Arc;
use async_trait::async_trait;
-pub use bounded_stager::BoundedStager;
+pub use bounded_stager::{BoundedStager, FsBlobGuard, FsDirGuard};
use futures::future::BoxFuture;
use futures::AsyncWrite;
diff --git a/src/puffin/src/puffin_manager/stager/bounded_stager.rs b/src/puffin/src/puffin_manager/stager/bounded_stager.rs
index 2b732450ca16..e63d4f5524d4 100644
--- a/src/puffin/src/puffin_manager/stager/bounded_stager.rs
+++ b/src/puffin/src/puffin_manager/stager/bounded_stager.rs
@@ -68,6 +68,10 @@ pub struct BoundedStager {
impl BoundedStager {
pub async fn new(base_dir: PathBuf, capacity: u64) -> Result<Self> {
+ tokio::fs::create_dir_all(&base_dir)
+ .await
+ .context(CreateSnafu)?;
+
let recycle_bin = Cache::builder()
.time_to_live(Duration::from_secs(60))
.build();
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index 1771377ee57b..c9c846807804 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -826,13 +826,16 @@ sst_write_buffer_size = "8MiB"
parallel_scan_channel_size = 32
allow_stale_entries = false
+[region_engine.mito.index]
+aux_path = ""
+staging_size = "2GiB"
+write_buffer_size = "8MiB"
+
[region_engine.mito.inverted_index]
create_on_flush = "auto"
create_on_compaction = "auto"
apply_on_query = "auto"
-write_buffer_size = "8MiB"
mem_threshold_on_create = "64.0MiB"
-intermediate_path = ""
[region_engine.mito.memtable]
type = "time_series"
|
feat
|
integrate puffin manager with inverted index applier (#4266)
|
e476e36647aa390d8974356fbd2d36d6a82e57e4
|
2024-09-05 10:12:29
|
Ning Sun
|
feat: add geohash and h3 as built-in functions (#4656)
| false
| "diff --git a/Cargo.lock b/Cargo.lock\nindex 47b22a44d7d3..4d1aff23c623 100644\n--- a/Cargo.lock\n++(...TRUNCATED)
|
feat
|
add geohash and h3 as built-in functions (#4656)
|
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- 4