hash
stringlengths 40
40
| date
stringdate 2022-04-19 15:26:27
2025-03-21 10:49:23
| author
stringclasses 86
values | commit_message
stringlengths 12
115
| is_merge
bool 1
class | git_diff
stringlengths 214
553k
⌀ | type
stringclasses 15
values | masked_commit_message
stringlengths 8
110
|
|---|---|---|---|---|---|---|---|
2b72e6653663d889d30421ebeaca5328202a6b9b
|
2024-11-18 14:07:06
|
Yohan Wal
|
test: subquery test migrated from duckdb (#4985)
| false
|
diff --git a/tests/cases/standalone/common/subquery/table.result b/tests/cases/standalone/common/subquery/table.result
new file mode 100644
index 000000000000..8cea3aed13e2
--- /dev/null
+++ b/tests/cases/standalone/common/subquery/table.result
@@ -0,0 +1,184 @@
+-- aliasing, from:
+-- https://github.com/duckdb/duckdb/blob/9196dd9b0a163e6c8aada26218803d04be30c562/test/sql/subquery/table/test_aliasing.test
+CREATE TABLE a(ts TIMESTAMP TIME INDEX, i INTEGER);
+
+Affected Rows: 0
+
+insert into a values (1, 42);
+
+Affected Rows: 1
+
+SELECT * FROM (SELECT i AS j FROM a GROUP BY j) WHERE j = 42;
+
++----+
+| j |
++----+
+| 42 |
++----+
+
+SELECT * FROM (SELECT i AS j FROM a GROUP BY i) WHERE j = 42;
+
++----+
+| j |
++----+
+| 42 |
++----+
+
+DROP TABLE a;
+
+Affected Rows: 0
+
+-- nested table subquery, from:
+-- https://github.com/duckdb/duckdb/blob/2e4e2913266ddc46c7281d1b992228cb0095954b/test/sql/subquery/table/test_nested_table_subquery.test_slow
+CREATE TABLE test (ts TIMESTAMP TIME INDEX, i INTEGER, j INTEGER);
+
+Affected Rows: 0
+
+INSERT INTO test VALUES (0, 3, 4), (1, 4, 5), (2, 5, 6);
+
+Affected Rows: 3
+
+SELECT * FROM (SELECT i, j FROM (SELECT j AS i, i AS j FROM (SELECT j AS i, i AS j FROM test) AS a) AS a) AS a, (SELECT i+1 AS r,j FROM test) AS b, test WHERE a.i=b.r AND test.j=a.i ORDER BY 1;
+
++---+---+---+---+-------------------------+---+---+
+| i | j | r | j | ts | i | j |
++---+---+---+---+-------------------------+---+---+
+| 4 | 5 | 4 | 4 | 1970-01-01T00:00:00 | 3 | 4 |
+| 5 | 6 | 5 | 5 | 1970-01-01T00:00:00.001 | 4 | 5 |
++---+---+---+---+-------------------------+---+---+
+
+SELECT i FROM (SELECT i + 1 AS i FROM (SELECT i + 1 AS i FROM (SELECT i + 1 AS i FROM test)));
+
++---+
+| i |
++---+
+| 6 |
+| 7 |
+| 8 |
++---+
+
+DROP TABLE test;
+
+Affected Rows: 0
+
+-- subquery union, from:
+-- https://github.com/duckdb/duckdb/blob/9196dd9b0a163e6c8aada26218803d04be30c562/test/sql/subquery/table/test_subquery_union.test
+SELECT * FROM (SELECT 42) UNION ALL SELECT * FROM (SELECT 43);
+
++-----------+
+| Int64(42) |
++-----------+
+| 42 |
+| 43 |
++-----------+
+
+-- table subquery, from:
+-- https://github.com/duckdb/duckdb/blob/8704c7d0807d6ce1e2ebcdf6398e1b6cc050e507/test/sql/subquery/table/test_table_subquery.test
+CREATE TABLE test (ts TIMESTAMP TIME INDEX, i INTEGER, j INTEGER);
+
+Affected Rows: 0
+
+INSERT INTO test VALUES (0, 3, 4), (1, 4, 5), (2, 5, 6);
+
+Affected Rows: 3
+
+SELECT * FROM (SELECT i, j AS d FROM test ORDER BY i) AS b;
+
++---+---+
+| i | d |
++---+---+
+| 3 | 4 |
+| 4 | 5 |
+| 5 | 6 |
++---+---+
+
+SELECT b.d FROM (SELECT i * 2 + j AS d FROM test) AS b;
+
++----+
+| d |
++----+
+| 10 |
+| 13 |
+| 16 |
++----+
+
+SELECT a.i,a.j,b.r,b.j FROM (SELECT i, j FROM test) AS a INNER JOIN (SELECT i+1 AS r,j FROM test) AS b ON a.i=b.r ORDER BY 1;
+
++---+---+---+---+
+| i | j | r | j |
++---+---+---+---+
+| 4 | 5 | 4 | 4 |
+| 5 | 6 | 5 | 5 |
++---+---+---+---+
+
+SELECT * FROM (SELECT i, j FROM test) AS a, (SELECT i+1 AS r,j FROM test) AS b, test WHERE a.i=b.r AND test.j=a.i ORDER BY 1;
+
++---+---+---+---+-------------------------+---+---+
+| i | j | r | j | ts | i | j |
++---+---+---+---+-------------------------+---+---+
+| 4 | 5 | 4 | 4 | 1970-01-01T00:00:00 | 3 | 4 |
+| 5 | 6 | 5 | 5 | 1970-01-01T00:00:00.001 | 4 | 5 |
++---+---+---+---+-------------------------+---+---+
+
+SELECT sum(x) FROM (SELECT i AS x FROM test GROUP BY i) sq;
+
++-----------+
+| SUM(sq.x) |
++-----------+
+| 12 |
++-----------+
+
+SELECT sum(x) FROM (SELECT i+1 AS x FROM test GROUP BY x) sq;
+
++-----------+
+| SUM(sq.x) |
++-----------+
+| 15 |
++-----------+
+
+DROP TABLE test;
+
+Affected Rows: 0
+
+-- test unamed subquery, from:
+-- https://github.com/duckdb/duckdb/blob/00a605270719941ca0412ad5d0a14b1bdfbf9eb5/test/sql/subquery/table/test_unnamed_subquery.test
+SELECT a FROM (SELECT 42 a);
+
++----+
+| a |
++----+
+| 42 |
++----+
+
+SELECT * FROM (SELECT 42 a), (SELECT 43 b);
+
++----+----+
+| a | b |
++----+----+
+| 42 | 43 |
++----+----+
+
+SELECT * FROM (VALUES (42, 43));
+
++---------+---------+
+| column1 | column2 |
++---------+---------+
+| 42 | 43 |
++---------+---------+
+
+SELECT * FROM (SELECT 42 a), (SELECT 43 b), (SELECT 44 c), (SELECT 45 d);
+
++----+----+----+----+
+| a | b | c | d |
++----+----+----+----+
+| 42 | 43 | 44 | 45 |
++----+----+----+----+
+
+SELECT * FROM (SELECT * FROM (SELECT 42 a), (SELECT 43 b)) JOIN (SELECT 44 c) ON (true) JOIN (SELECT 45 d) ON (true);
+
++----+----+----+----+
+| a | b | c | d |
++----+----+----+----+
+| 42 | 43 | 44 | 45 |
++----+----+----+----+
+
diff --git a/tests/cases/standalone/common/subquery/table.sql b/tests/cases/standalone/common/subquery/table.sql
new file mode 100644
index 000000000000..9f53aef301f4
--- /dev/null
+++ b/tests/cases/standalone/common/subquery/table.sql
@@ -0,0 +1,64 @@
+-- aliasing, from:
+-- https://github.com/duckdb/duckdb/blob/9196dd9b0a163e6c8aada26218803d04be30c562/test/sql/subquery/table/test_aliasing.test
+CREATE TABLE a(ts TIMESTAMP TIME INDEX, i INTEGER);
+
+insert into a values (1, 42);
+
+SELECT * FROM (SELECT i AS j FROM a GROUP BY j) WHERE j = 42;
+
+SELECT * FROM (SELECT i AS j FROM a GROUP BY i) WHERE j = 42;
+
+DROP TABLE a;
+
+-- nested table subquery, from:
+-- https://github.com/duckdb/duckdb/blob/2e4e2913266ddc46c7281d1b992228cb0095954b/test/sql/subquery/table/test_nested_table_subquery.test_slow
+CREATE TABLE test (ts TIMESTAMP TIME INDEX, i INTEGER, j INTEGER);
+
+INSERT INTO test VALUES (0, 3, 4), (1, 4, 5), (2, 5, 6);
+
+SELECT * FROM (SELECT i, j FROM (SELECT j AS i, i AS j FROM (SELECT j AS i, i AS j FROM test) AS a) AS a) AS a, (SELECT i+1 AS r,j FROM test) AS b, test WHERE a.i=b.r AND test.j=a.i ORDER BY 1;
+
+SELECT i FROM (SELECT i + 1 AS i FROM (SELECT i + 1 AS i FROM (SELECT i + 1 AS i FROM test)));
+
+DROP TABLE test;
+
+-- subquery union, from:
+-- https://github.com/duckdb/duckdb/blob/9196dd9b0a163e6c8aada26218803d04be30c562/test/sql/subquery/table/test_subquery_union.test
+SELECT * FROM (SELECT 42) UNION ALL SELECT * FROM (SELECT 43);
+
+-- table subquery, from:
+-- https://github.com/duckdb/duckdb/blob/8704c7d0807d6ce1e2ebcdf6398e1b6cc050e507/test/sql/subquery/table/test_table_subquery.test
+CREATE TABLE test (ts TIMESTAMP TIME INDEX, i INTEGER, j INTEGER);
+
+INSERT INTO test VALUES (0, 3, 4), (1, 4, 5), (2, 5, 6);
+
+SELECT * FROM (SELECT i, j AS d FROM test ORDER BY i) AS b;
+
+SELECT b.d FROM (SELECT i * 2 + j AS d FROM test) AS b;
+
+SELECT a.i,a.j,b.r,b.j FROM (SELECT i, j FROM test) AS a INNER JOIN (SELECT i+1 AS r,j FROM test) AS b ON a.i=b.r ORDER BY 1;
+
+SELECT * FROM (SELECT i, j FROM test) AS a, (SELECT i+1 AS r,j FROM test) AS b, test WHERE a.i=b.r AND test.j=a.i ORDER BY 1;
+
+SELECT sum(x) FROM (SELECT i AS x FROM test GROUP BY i) sq;
+
+SELECT sum(x) FROM (SELECT i+1 AS x FROM test GROUP BY x) sq;
+
+DROP TABLE test;
+
+-- test unamed subquery, from:
+-- https://github.com/duckdb/duckdb/blob/00a605270719941ca0412ad5d0a14b1bdfbf9eb5/test/sql/subquery/table/test_unnamed_subquery.test
+SELECT a FROM (SELECT 42 a);
+
+SELECT * FROM (SELECT 42 a), (SELECT 43 b);
+
+SELECT * FROM (VALUES (42, 43));
+
+SELECT * FROM (SELECT 42 a), (SELECT 43 b), (SELECT 44 c), (SELECT 45 d);
+
+SELECT * FROM (SELECT * FROM (SELECT 42 a), (SELECT 43 b)) JOIN (SELECT 44 c) ON (true) JOIN (SELECT 45 d) ON (true);
+
+-- skipped, unsupported feature: unnamed_subquery, see also:
+-- https://github.com/GreptimeTeam/greptimedb/issues/5012
+-- SELECT * FROM (SELECT unnamed_subquery.a FROM (SELECT 42 a)), (SELECT unnamed_subquery.b FROM (SELECT 43 b));
+-- SELECT unnamed_subquery.a, unnamed_subquery2.b FROM (SELECT 42 a), (SELECT 43 b);
diff --git a/tests/cases/standalone/common/subquery/test_neumann.result b/tests/cases/standalone/common/subquery/test_neumann.result
new file mode 100644
index 000000000000..6b4216795302
--- /dev/null
+++ b/tests/cases/standalone/common/subquery/test_neumann.result
@@ -0,0 +1,58 @@
+-- from:
+-- https://github.com/duckdb/duckdb/blob/74687ec572e9e6ccf34f9b15daa62998b34a3e13/test/sql/subquery/test_neumann.test
+CREATE TABLE students(ts TIMESTAMP TIME INDEX, id INTEGER, n VARCHAR, major VARCHAR, y INTEGER);
+
+Affected Rows: 0
+
+CREATE TABLE exams(ts TIMESTAMP TIME INDEX, sid INTEGER, course VARCHAR, curriculum VARCHAR, grade INTEGER, y INTEGER);
+
+Affected Rows: 0
+
+INSERT INTO students VALUES (1, 1, 'Mark', 'CS', 2017);
+
+Affected Rows: 1
+
+INSERT INTO students VALUES (2, 2, 'Dirk', 'CS', 2017);
+
+Affected Rows: 1
+
+INSERT INTO exams VALUES (1, 1, 'Database Systems', 'CS', 10, 2015);
+
+Affected Rows: 1
+
+INSERT INTO exams VALUES (2, 1, 'Graphics', 'CS', 9, 2016);
+
+Affected Rows: 1
+
+INSERT INTO exams VALUES (3, 2, 'Database Systems', 'CS', 7, 2015);
+
+Affected Rows: 1
+
+INSERT INTO exams VALUES (4, 2, 'Graphics', 'CS', 7, 2016);
+
+Affected Rows: 1
+
+SELECT s.n, e.course, e.grade FROM students s, exams e WHERE s.id=e.sid AND e.grade=(SELECT MAX(e2.grade) FROM exams e2 WHERE s.id=e2.sid) ORDER BY n, course;
+
++------+------------------+-------+
+| n | course | grade |
++------+------------------+-------+
+| Dirk | Database Systems | 7 |
+| Dirk | Graphics | 7 |
+| Mark | Database Systems | 10 |
++------+------------------+-------+
+
+-- skipped, unsupported feature: correlated column in predicate, see also:
+-- https://github.com/GreptimeTeam/greptimedb/issues/5012
+-- SELECT s.n, e.course, e.grade FROM students s, exams e WHERE s.id=e.sid AND (s.major = 'CS' OR s.major = 'Games Eng') AND e.grade <= (SELECT AVG(e2.grade) - 1 FROM exams e2 WHERE s.id=e2.sid OR (e2.curriculum=s.major AND s.y>=e2.y)) ORDER BY n, course;
+-- skipped, unsupported feature: exists, see also:
+-- https://github.com/GreptimeTeam/greptimedb/issues/5012
+-- SELECT n, major FROM students s WHERE EXISTS(SELECT * FROM exams e WHERE e.sid=s.id AND grade=10) OR s.n='Dirk' ORDER BY n;
+DROP TABLE students;
+
+Affected Rows: 0
+
+DROP TABLE exams;
+
+Affected Rows: 0
+
diff --git a/tests/cases/standalone/common/subquery/test_neumann.sql b/tests/cases/standalone/common/subquery/test_neumann.sql
new file mode 100644
index 000000000000..3b29b5be226d
--- /dev/null
+++ b/tests/cases/standalone/common/subquery/test_neumann.sql
@@ -0,0 +1,31 @@
+-- from:
+-- https://github.com/duckdb/duckdb/blob/74687ec572e9e6ccf34f9b15daa62998b34a3e13/test/sql/subquery/test_neumann.test
+CREATE TABLE students(ts TIMESTAMP TIME INDEX, id INTEGER, n VARCHAR, major VARCHAR, y INTEGER);
+
+CREATE TABLE exams(ts TIMESTAMP TIME INDEX, sid INTEGER, course VARCHAR, curriculum VARCHAR, grade INTEGER, y INTEGER);
+
+INSERT INTO students VALUES (1, 1, 'Mark', 'CS', 2017);
+
+INSERT INTO students VALUES (2, 2, 'Dirk', 'CS', 2017);
+
+INSERT INTO exams VALUES (1, 1, 'Database Systems', 'CS', 10, 2015);
+
+INSERT INTO exams VALUES (2, 1, 'Graphics', 'CS', 9, 2016);
+
+INSERT INTO exams VALUES (3, 2, 'Database Systems', 'CS', 7, 2015);
+
+INSERT INTO exams VALUES (4, 2, 'Graphics', 'CS', 7, 2016);
+
+SELECT s.n, e.course, e.grade FROM students s, exams e WHERE s.id=e.sid AND e.grade=(SELECT MAX(e2.grade) FROM exams e2 WHERE s.id=e2.sid) ORDER BY n, course;
+
+-- skipped, unsupported feature: correlated column in predicate, see also:
+-- https://github.com/GreptimeTeam/greptimedb/issues/5012
+-- SELECT s.n, e.course, e.grade FROM students s, exams e WHERE s.id=e.sid AND (s.major = 'CS' OR s.major = 'Games Eng') AND e.grade <= (SELECT AVG(e2.grade) - 1 FROM exams e2 WHERE s.id=e2.sid OR (e2.curriculum=s.major AND s.y>=e2.y)) ORDER BY n, course;
+
+-- skipped, unsupported feature: exists, see also:
+-- https://github.com/GreptimeTeam/greptimedb/issues/5012
+-- SELECT n, major FROM students s WHERE EXISTS(SELECT * FROM exams e WHERE e.sid=s.id AND grade=10) OR s.n='Dirk' ORDER BY n;
+
+DROP TABLE students;
+
+DROP TABLE exams;
|
test
|
subquery test migrated from duckdb (#4985)
|
ec6335336428abec331bd624f329024f25adfbfe
|
2022-05-07 12:57:00
|
evenyag
|
refactor: Move planner and adapters to datafusion mod
| false
|
diff --git a/src/query/src/datafusion.rs b/src/query/src/datafusion.rs
index 29dd339b97be..700a33534ee9 100644
--- a/src/query/src/datafusion.rs
+++ b/src/query/src/datafusion.rs
@@ -1,5 +1,9 @@
-mod adapter;
-pub mod error;
+//! Planner, QueryEngine implementations based on DataFusion.
+
+mod catalog_adapter;
+mod error;
+mod plan_adapter;
+mod planner;
use std::sync::Arc;
@@ -7,17 +11,19 @@ use common_recordbatch::{EmptyRecordBatchStream, SendableRecordBatchStream};
use snafu::{OptionExt, ResultExt};
use sql::{dialect::GenericDialect, parser::ParserContext};
+pub use crate::datafusion::catalog_adapter::DfCatalogListAdapter;
use crate::query_engine::{QueryContext, QueryEngineState};
use crate::{
catalog::CatalogListRef,
- datafusion::adapter::PhysicalPlanAdapter,
+ datafusion::plan_adapter::PhysicalPlanAdapter,
+ datafusion::planner::{DfContextProviderAdapter, DfPlanner},
error::Result,
executor::QueryExecutor,
logical_optimizer::LogicalOptimizer,
physical_optimizer::PhysicalOptimizer,
physical_planner::PhysicalPlanner,
plan::{LogicalPlan, PhysicalPlan},
- planner::{DfContextProviderAdapter, DfPlanner, Planner},
+ planner::Planner,
Output, QueryEngine,
};
diff --git a/src/query/src/datafusion/catalog_adapter.rs b/src/query/src/datafusion/catalog_adapter.rs
new file mode 100644
index 000000000000..e357b28edb2e
--- /dev/null
+++ b/src/query/src/datafusion/catalog_adapter.rs
@@ -0,0 +1,222 @@
+//! Catalog adapter between datafusion and greptime query engine.
+
+use std::any::Any;
+use std::sync::Arc;
+
+use datafusion::catalog::{
+ catalog::{CatalogList as DfCatalogList, CatalogProvider as DfCatalogProvider},
+ schema::SchemaProvider as DfSchemaProvider,
+};
+use datafusion::datasource::TableProvider as DfTableProvider;
+use datafusion::error::Result as DataFusionResult;
+use datafusion::execution::runtime_env::RuntimeEnv;
+use snafu::ResultExt;
+use table::{
+ table::adapter::{DfTableProviderAdapter, TableAdapter},
+ Table,
+};
+
+use crate::catalog::{schema::SchemaProvider, CatalogListRef, CatalogProvider};
+use crate::datafusion::error;
+use crate::error::Result;
+
+pub struct DfCatalogListAdapter {
+ runtime: Arc<RuntimeEnv>,
+ catalog_list: CatalogListRef,
+}
+
+impl DfCatalogListAdapter {
+ pub fn new(runtime: Arc<RuntimeEnv>, catalog_list: CatalogListRef) -> DfCatalogListAdapter {
+ DfCatalogListAdapter {
+ runtime,
+ catalog_list,
+ }
+ }
+}
+
+impl DfCatalogList for DfCatalogListAdapter {
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn register_catalog(
+ &self,
+ name: String,
+ catalog: Arc<dyn DfCatalogProvider>,
+ ) -> Option<Arc<dyn DfCatalogProvider>> {
+ let catalog_adapter = Arc::new(CatalogProviderAdapter {
+ df_cataglog_provider: catalog,
+ runtime: self.runtime.clone(),
+ });
+ self.catalog_list
+ .register_catalog(name, catalog_adapter)
+ .map(|catalog_provider| {
+ Arc::new(DfCatalogProviderAdapter {
+ catalog_provider,
+ runtime: self.runtime.clone(),
+ }) as _
+ })
+ }
+
+ fn catalog_names(&self) -> Vec<String> {
+ self.catalog_list.catalog_names()
+ }
+
+ fn catalog(&self, name: &str) -> Option<Arc<dyn DfCatalogProvider>> {
+ self.catalog_list.catalog(name).map(|catalog_provider| {
+ Arc::new(DfCatalogProviderAdapter {
+ catalog_provider,
+ runtime: self.runtime.clone(),
+ }) as _
+ })
+ }
+}
+
+/// Datafusion's CatalogProvider -> greptime CatalogProvider
+struct CatalogProviderAdapter {
+ df_cataglog_provider: Arc<dyn DfCatalogProvider>,
+ runtime: Arc<RuntimeEnv>,
+}
+
+impl CatalogProvider for CatalogProviderAdapter {
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn schema_names(&self) -> Vec<String> {
+ self.df_cataglog_provider.schema_names()
+ }
+
+ fn schema(&self, name: &str) -> Option<Arc<dyn SchemaProvider>> {
+ self.df_cataglog_provider
+ .schema(name)
+ .map(|df_schema_provider| {
+ Arc::new(SchemaProviderAdapter {
+ df_schema_provider,
+ runtime: self.runtime.clone(),
+ }) as _
+ })
+ }
+}
+
+///Greptime CatalogProvider -> datafusion's CatalogProvider
+struct DfCatalogProviderAdapter {
+ catalog_provider: Arc<dyn CatalogProvider>,
+ runtime: Arc<RuntimeEnv>,
+}
+
+impl DfCatalogProvider for DfCatalogProviderAdapter {
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn schema_names(&self) -> Vec<String> {
+ self.catalog_provider.schema_names()
+ }
+
+ fn schema(&self, name: &str) -> Option<Arc<dyn DfSchemaProvider>> {
+ self.catalog_provider.schema(name).map(|schema_provider| {
+ Arc::new(DfSchemaProviderAdapter {
+ schema_provider,
+ runtime: self.runtime.clone(),
+ }) as _
+ })
+ }
+}
+
+/// Greptime SchemaProvider -> datafusion SchemaProvider
+struct DfSchemaProviderAdapter {
+ schema_provider: Arc<dyn SchemaProvider>,
+ runtime: Arc<RuntimeEnv>,
+}
+
+impl DfSchemaProvider for DfSchemaProviderAdapter {
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn table_names(&self) -> Vec<String> {
+ self.schema_provider.table_names()
+ }
+
+ fn table(&self, name: &str) -> Option<Arc<dyn DfTableProvider>> {
+ self.schema_provider
+ .table(name)
+ .map(|table| Arc::new(DfTableProviderAdapter::new(table)) as _)
+ }
+
+ fn register_table(
+ &self,
+ name: String,
+ table: Arc<dyn DfTableProvider>,
+ ) -> DataFusionResult<Option<Arc<dyn DfTableProvider>>> {
+ let table = Arc::new(TableAdapter::new(table, self.runtime.clone()));
+ match self.schema_provider.register_table(name, table)? {
+ Some(p) => Ok(Some(Arc::new(DfTableProviderAdapter::new(p)))),
+ None => Ok(None),
+ }
+ }
+
+ fn deregister_table(&self, name: &str) -> DataFusionResult<Option<Arc<dyn DfTableProvider>>> {
+ match self.schema_provider.deregister_table(name)? {
+ Some(p) => Ok(Some(Arc::new(DfTableProviderAdapter::new(p)))),
+ None => Ok(None),
+ }
+ }
+
+ fn table_exist(&self, name: &str) -> bool {
+ self.schema_provider.table_exist(name)
+ }
+}
+
+/// Datafuion SchemaProviderAdapter -> greptime SchemaProviderAdapter
+struct SchemaProviderAdapter {
+ df_schema_provider: Arc<dyn DfSchemaProvider>,
+ runtime: Arc<RuntimeEnv>,
+}
+
+impl SchemaProvider for SchemaProviderAdapter {
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ /// Retrieves the list of available table names in this schema.
+ fn table_names(&self) -> Vec<String> {
+ self.df_schema_provider.table_names()
+ }
+
+ fn table(&self, name: &str) -> Option<Arc<dyn Table>> {
+ self.df_schema_provider.table(name).map(|table_provider| {
+ Arc::new(TableAdapter::new(table_provider, self.runtime.clone())) as _
+ })
+ }
+
+ fn register_table(
+ &self,
+ name: String,
+ table: Arc<dyn Table>,
+ ) -> Result<Option<Arc<dyn Table>>> {
+ let table_provider = Arc::new(DfTableProviderAdapter::new(table));
+ Ok(self
+ .df_schema_provider
+ .register_table(name, table_provider)
+ .context(error::DatafusionSnafu {
+ msg: "Fail to register table to datafusion",
+ })?
+ .map(|table| (Arc::new(TableAdapter::new(table, self.runtime.clone())) as _)))
+ }
+
+ fn deregister_table(&self, name: &str) -> Result<Option<Arc<dyn Table>>> {
+ Ok(self
+ .df_schema_provider
+ .deregister_table(name)
+ .context(error::DatafusionSnafu {
+ msg: "Fail to deregister table from datafusion",
+ })?
+ .map(|table| Arc::new(TableAdapter::new(table, self.runtime.clone())) as _))
+ }
+
+ fn table_exist(&self, name: &str) -> bool {
+ self.df_schema_provider.table_exist(name)
+ }
+}
diff --git a/src/query/src/datafusion/error.rs b/src/query/src/datafusion/error.rs
index 1d89deacd251..ba804e5081dd 100644
--- a/src/query/src/datafusion/error.rs
+++ b/src/query/src/datafusion/error.rs
@@ -22,7 +22,7 @@ pub enum InnerError {
ParseSql { source: sql::errors::ParserError },
#[snafu(display("Cannot plan SQL: {}, source: {}", sql, source))]
- Planner {
+ PlanSql {
sql: String,
source: DataFusionError,
backtrace: Backtrace,
@@ -35,7 +35,7 @@ impl ErrorExt for InnerError {
match self {
ParseSql { source, .. } => source.status_code(),
- Datafusion { .. } | PhysicalPlanDowncast { .. } | Planner { .. } => {
+ Datafusion { .. } | PhysicalPlanDowncast { .. } | PlanSql { .. } => {
StatusCode::Internal
}
}
diff --git a/src/query/src/datafusion/adapter.rs b/src/query/src/datafusion/plan_adapter.rs
similarity index 100%
rename from src/query/src/datafusion/adapter.rs
rename to src/query/src/datafusion/plan_adapter.rs
diff --git a/src/query/src/datafusion/planner.rs b/src/query/src/datafusion/planner.rs
new file mode 100644
index 000000000000..179b1099dab2
--- /dev/null
+++ b/src/query/src/datafusion/planner.rs
@@ -0,0 +1,113 @@
+use std::sync::Arc;
+
+use arrow::datatypes::DataType;
+use datafusion::catalog::TableReference;
+use datafusion::datasource::TableProvider;
+use datafusion::physical_plan::udaf::AggregateUDF;
+use datafusion::physical_plan::udf::ScalarUDF;
+use datafusion::sql::planner::{ContextProvider, SqlToRel};
+use snafu::ResultExt;
+use sql::statements::query::Query;
+use sql::statements::statement::Statement;
+use table::table::adapter::DfTableProviderAdapter;
+
+use crate::{
+ catalog::{self, CatalogListRef},
+ datafusion::error,
+ error::Result,
+ plan::LogicalPlan,
+ planner::Planner,
+};
+
+pub struct DfPlanner<'a, S: ContextProvider> {
+ sql_to_rel: SqlToRel<'a, S>,
+}
+
+impl<'a, S: ContextProvider + Send + Sync> DfPlanner<'a, S> {
+ /// Creates a DataFusion planner instance
+ pub fn new(schema_provider: &'a S) -> Self {
+ let rel = SqlToRel::new(schema_provider);
+ Self { sql_to_rel: rel }
+ }
+
+ /// Converts QUERY statement to logical plan.
+ pub fn query_to_plan(&self, query: Box<Query>) -> Result<LogicalPlan> {
+ // todo(hl): original SQL should be provided as an argument
+ let sql = query.inner.to_string();
+ let result = self
+ .sql_to_rel
+ .query_to_plan(query.inner)
+ .context(error::PlanSqlSnafu { sql })?;
+
+ Ok(LogicalPlan::DfPlan(result))
+ }
+}
+
+impl<'a, S> Planner for DfPlanner<'a, S>
+where
+ S: ContextProvider + Send + Sync,
+{
+ /// Converts statement to logical plan using datafusion planner
+ fn statement_to_plan(&self, statement: Statement) -> Result<LogicalPlan> {
+ match statement {
+ Statement::ShowDatabases(_) => {
+ todo!("Currently not supported")
+ }
+ Statement::Query(qb) => self.query_to_plan(qb),
+ Statement::Insert(_) => {
+ todo!()
+ }
+ }
+ }
+}
+
+pub(crate) struct DfContextProviderAdapter<'a> {
+ catalog_list: &'a CatalogListRef,
+}
+
+impl<'a> DfContextProviderAdapter<'a> {
+ pub(crate) fn new(catalog_list: &'a CatalogListRef) -> Self {
+ Self { catalog_list }
+ }
+}
+
+impl<'a> ContextProvider for DfContextProviderAdapter<'a> {
+ fn get_table_provider(&self, name: TableReference) -> Option<Arc<dyn TableProvider>> {
+ let (catalog, schema, table) = match name {
+ TableReference::Bare { table } => (
+ catalog::DEFAULT_CATALOG_NAME,
+ catalog::DEFAULT_SCHEMA_NAME,
+ table,
+ ),
+ TableReference::Partial { schema, table } => {
+ (catalog::DEFAULT_CATALOG_NAME, schema, table)
+ }
+ TableReference::Full {
+ catalog,
+ schema,
+ table,
+ } => (catalog, schema, table),
+ };
+
+ self.catalog_list
+ .catalog(catalog)
+ .and_then(|catalog_provider| catalog_provider.schema(schema))
+ .and_then(|schema_provider| schema_provider.table(table))
+ .map(|table| Arc::new(DfTableProviderAdapter::new(table)) as _)
+ }
+
+ fn get_function_meta(&self, _name: &str) -> Option<Arc<ScalarUDF>> {
+ // TODO(dennis)
+ None
+ }
+
+ fn get_aggregate_meta(&self, _name: &str) -> Option<Arc<AggregateUDF>> {
+ // TODO(dennis)
+ None
+ }
+
+ fn get_variable_type(&self, _variable_names: &[String]) -> Option<DataType> {
+ // TODO(dennis)
+ None
+ }
+}
diff --git a/src/query/src/planner.rs b/src/query/src/planner.rs
index d00d51a6db7e..0814a037116c 100644
--- a/src/query/src/planner.rs
+++ b/src/query/src/planner.rs
@@ -1,111 +1,8 @@
-use std::sync::Arc;
-
-use arrow::datatypes::DataType;
-use datafusion::catalog::TableReference;
-use datafusion::datasource::TableProvider;
-use datafusion::physical_plan::udaf::AggregateUDF;
-use datafusion::physical_plan::udf::ScalarUDF;
-use datafusion::sql::planner::{ContextProvider, SqlToRel};
-use snafu::ResultExt;
-use sql::statements::query::Query;
use sql::statements::statement::Statement;
-use table::table::adapter::DfTableProviderAdapter;
-use crate::{
- catalog::{CatalogListRef, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME},
- datafusion::error,
- error::Result,
- plan::LogicalPlan,
-};
+use crate::{error::Result, plan::LogicalPlan};
+/// SQL logical planner.
pub trait Planner: Send + Sync {
fn statement_to_plan(&self, statement: Statement) -> Result<LogicalPlan>;
}
-
-pub struct DfPlanner<'a, S: ContextProvider> {
- sql_to_rel: SqlToRel<'a, S>,
-}
-
-impl<'a, S: ContextProvider + Send + Sync> DfPlanner<'a, S> {
- /// Creates a DataFusion planner instance
- pub fn new(schema_provider: &'a S) -> Self {
- let rel = SqlToRel::new(schema_provider);
- Self { sql_to_rel: rel }
- }
-
- /// Converts QUERY statement to logical plan.
- pub fn query_to_plan(&self, query: Box<Query>) -> Result<LogicalPlan> {
- // todo(hl): original SQL should be provided as an argument
- let sql = query.inner.to_string();
- let result = self
- .sql_to_rel
- .query_to_plan(query.inner)
- // FIXME(yingwen): Move DfPlanner to datafusion mod.
- .context(error::PlannerSnafu { sql })?;
-
- Ok(LogicalPlan::DfPlan(result))
- }
-}
-
-impl<'a, S> Planner for DfPlanner<'a, S>
-where
- S: ContextProvider + Send + Sync,
-{
- /// Converts statement to logical plan using datafusion planner
- fn statement_to_plan(&self, statement: Statement) -> Result<LogicalPlan> {
- match statement {
- Statement::ShowDatabases(_) => {
- todo!("Currently not supported")
- }
- Statement::Query(qb) => self.query_to_plan(qb),
- Statement::Insert(_) => {
- todo!()
- }
- }
- }
-}
-
-pub(crate) struct DfContextProviderAdapter<'a> {
- catalog_list: &'a CatalogListRef,
-}
-
-impl<'a> DfContextProviderAdapter<'a> {
- pub(crate) fn new(catalog_list: &'a CatalogListRef) -> Self {
- Self { catalog_list }
- }
-}
-
-impl<'a> ContextProvider for DfContextProviderAdapter<'a> {
- fn get_table_provider(&self, name: TableReference) -> Option<Arc<dyn TableProvider>> {
- let (catalog, schema, table) = match name {
- TableReference::Bare { table } => (DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table),
- TableReference::Partial { schema, table } => (DEFAULT_CATALOG_NAME, schema, table),
- TableReference::Full {
- catalog,
- schema,
- table,
- } => (catalog, schema, table),
- };
-
- self.catalog_list
- .catalog(catalog)
- .and_then(|catalog_provider| catalog_provider.schema(schema))
- .and_then(|schema_provider| schema_provider.table(table))
- .map(|table| Arc::new(DfTableProviderAdapter::new(table)) as _)
- }
-
- fn get_function_meta(&self, _name: &str) -> Option<Arc<ScalarUDF>> {
- // TODO(dennis)
- None
- }
-
- fn get_aggregate_meta(&self, _name: &str) -> Option<Arc<AggregateUDF>> {
- // TODO(dennis)
- None
- }
-
- fn get_variable_type(&self, _variable_names: &[String]) -> Option<DataType> {
- // TODO(dennis)
- None
- }
-}
diff --git a/src/query/src/query_engine/state.rs b/src/query/src/query_engine/state.rs
index f5db6dabedc0..f7301f6a997b 100644
--- a/src/query/src/query_engine/state.rs
+++ b/src/query/src/query_engine/state.rs
@@ -1,24 +1,10 @@
-use std::any::Any;
use std::fmt;
use std::sync::Arc;
-use datafusion::catalog::{
- catalog::{CatalogList as DfCatalogList, CatalogProvider as DfCatalogProvider},
- schema::SchemaProvider as DfSchemaProvider,
-};
-use datafusion::datasource::TableProvider as DfTableProvider;
-use datafusion::error::Result as DataFusionResult;
-use datafusion::execution::runtime_env::RuntimeEnv;
use datafusion::prelude::{ExecutionConfig, ExecutionContext};
-use snafu::ResultExt;
-use table::{
- table::adapter::{DfTableProviderAdapter, TableAdapter},
- Table,
-};
-use crate::catalog::{self, schema::SchemaProvider, CatalogListRef, CatalogProvider};
-use crate::datafusion::error;
-use crate::error::Result;
+use crate::catalog::{self, CatalogListRef};
+use crate::datafusion::DfCatalogListAdapter;
use crate::executor::Runtime;
/// Query engine global state
@@ -46,10 +32,10 @@ impl QueryEngineState {
);
let df_context = ExecutionContext::with_config(config);
- df_context.state.lock().catalog_list = Arc::new(DfCatalogListAdapter {
- catalog_list: catalog_list.clone(),
- runtime: df_context.runtime_env(),
- });
+ df_context.state.lock().catalog_list = Arc::new(DfCatalogListAdapter::new(
+ df_context.runtime_env(),
+ catalog_list.clone(),
+ ));
Self {
df_context,
@@ -72,196 +58,3 @@ impl QueryEngineState {
self.df_context.runtime_env().into()
}
}
-
-/// Adapters between datafusion and greptime query engine.
-struct DfCatalogListAdapter {
- runtime: Arc<RuntimeEnv>,
- catalog_list: CatalogListRef,
-}
-
-impl DfCatalogList for DfCatalogListAdapter {
- fn as_any(&self) -> &dyn Any {
- self
- }
-
- fn register_catalog(
- &self,
- name: String,
- catalog: Arc<dyn DfCatalogProvider>,
- ) -> Option<Arc<dyn DfCatalogProvider>> {
- let catalog_adapter = Arc::new(CatalogProviderAdapter {
- df_cataglog_provider: catalog,
- runtime: self.runtime.clone(),
- });
- self.catalog_list
- .register_catalog(name, catalog_adapter)
- .map(|catalog_provider| {
- Arc::new(DfCatalogProviderAdapter {
- catalog_provider,
- runtime: self.runtime.clone(),
- }) as _
- })
- }
-
- fn catalog_names(&self) -> Vec<String> {
- self.catalog_list.catalog_names()
- }
-
- fn catalog(&self, name: &str) -> Option<Arc<dyn DfCatalogProvider>> {
- self.catalog_list.catalog(name).map(|catalog_provider| {
- Arc::new(DfCatalogProviderAdapter {
- catalog_provider,
- runtime: self.runtime.clone(),
- }) as _
- })
- }
-}
-
-/// Datafusion's CatalogProvider -> greptime CatalogProvider
-struct CatalogProviderAdapter {
- df_cataglog_provider: Arc<dyn DfCatalogProvider>,
- runtime: Arc<RuntimeEnv>,
-}
-
-impl CatalogProvider for CatalogProviderAdapter {
- fn as_any(&self) -> &dyn Any {
- self
- }
-
- fn schema_names(&self) -> Vec<String> {
- self.df_cataglog_provider.schema_names()
- }
-
- fn schema(&self, name: &str) -> Option<Arc<dyn SchemaProvider>> {
- self.df_cataglog_provider
- .schema(name)
- .map(|df_schema_provider| {
- Arc::new(SchemaProviderAdapter {
- df_schema_provider,
- runtime: self.runtime.clone(),
- }) as _
- })
- }
-}
-
-///Greptime CatalogProvider -> datafusion's CatalogProvider
-struct DfCatalogProviderAdapter {
- catalog_provider: Arc<dyn CatalogProvider>,
- runtime: Arc<RuntimeEnv>,
-}
-
-impl DfCatalogProvider for DfCatalogProviderAdapter {
- fn as_any(&self) -> &dyn Any {
- self
- }
-
- fn schema_names(&self) -> Vec<String> {
- self.catalog_provider.schema_names()
- }
-
- fn schema(&self, name: &str) -> Option<Arc<dyn DfSchemaProvider>> {
- self.catalog_provider.schema(name).map(|schema_provider| {
- Arc::new(DfSchemaProviderAdapter {
- schema_provider,
- runtime: self.runtime.clone(),
- }) as _
- })
- }
-}
-
-/// Greptime SchemaProvider -> datafusion SchemaProvider
-struct DfSchemaProviderAdapter {
- schema_provider: Arc<dyn SchemaProvider>,
- runtime: Arc<RuntimeEnv>,
-}
-
-impl DfSchemaProvider for DfSchemaProviderAdapter {
- fn as_any(&self) -> &dyn Any {
- self
- }
-
- fn table_names(&self) -> Vec<String> {
- self.schema_provider.table_names()
- }
-
- fn table(&self, name: &str) -> Option<Arc<dyn DfTableProvider>> {
- self.schema_provider
- .table(name)
- .map(|table| Arc::new(DfTableProviderAdapter::new(table)) as _)
- }
-
- fn register_table(
- &self,
- name: String,
- table: Arc<dyn DfTableProvider>,
- ) -> DataFusionResult<Option<Arc<dyn DfTableProvider>>> {
- let table = Arc::new(TableAdapter::new(table, self.runtime.clone()));
- match self.schema_provider.register_table(name, table)? {
- Some(p) => Ok(Some(Arc::new(DfTableProviderAdapter::new(p)))),
- None => Ok(None),
- }
- }
-
- fn deregister_table(&self, name: &str) -> DataFusionResult<Option<Arc<dyn DfTableProvider>>> {
- match self.schema_provider.deregister_table(name)? {
- Some(p) => Ok(Some(Arc::new(DfTableProviderAdapter::new(p)))),
- None => Ok(None),
- }
- }
-
- fn table_exist(&self, name: &str) -> bool {
- self.schema_provider.table_exist(name)
- }
-}
-
-/// Datafuion SchemaProviderAdapter -> greptime SchemaProviderAdapter
-struct SchemaProviderAdapter {
- df_schema_provider: Arc<dyn DfSchemaProvider>,
- runtime: Arc<RuntimeEnv>,
-}
-
-impl SchemaProvider for SchemaProviderAdapter {
- fn as_any(&self) -> &dyn Any {
- self
- }
-
- /// Retrieves the list of available table names in this schema.
- fn table_names(&self) -> Vec<String> {
- self.df_schema_provider.table_names()
- }
-
- fn table(&self, name: &str) -> Option<Arc<dyn Table>> {
- self.df_schema_provider.table(name).map(|table_provider| {
- Arc::new(TableAdapter::new(table_provider, self.runtime.clone())) as _
- })
- }
-
- fn register_table(
- &self,
- name: String,
- table: Arc<dyn Table>,
- ) -> Result<Option<Arc<dyn Table>>> {
- let table_provider = Arc::new(DfTableProviderAdapter::new(table));
- Ok(self
- .df_schema_provider
- .register_table(name, table_provider)
- .context(error::DatafusionSnafu {
- msg: "Fail to register table to datafusion",
- })?
- .map(|table| (Arc::new(TableAdapter::new(table, self.runtime.clone())) as _)))
- }
-
- fn deregister_table(&self, name: &str) -> Result<Option<Arc<dyn Table>>> {
- Ok(self
- .df_schema_provider
- .deregister_table(name)
- .context(error::DatafusionSnafu {
- msg: "Fail to deregister table from datafusion",
- })?
- .map(|table| Arc::new(TableAdapter::new(table, self.runtime.clone())) as _))
- }
-
- fn table_exist(&self, name: &str) -> bool {
- self.df_schema_provider.table_exist(name)
- }
-}
|
refactor
|
Move planner and adapters to datafusion mod
|
dcc08f6b3ed45813470d3b64f4e02d2c38e870e9
|
2024-10-30 16:42:58
|
dennis zhuang
|
feat: adds the number of rows and index files size to region_statistics table (#4909)
| false
|
diff --git a/src/catalog/src/system_schema/information_schema/region_statistics.rs b/src/catalog/src/system_schema/information_schema/region_statistics.rs
index e92558acd0d6..a98a70cb2347 100644
--- a/src/catalog/src/system_schema/information_schema/region_statistics.rs
+++ b/src/catalog/src/system_schema/information_schema/region_statistics.rs
@@ -39,9 +39,12 @@ use crate::CatalogManager;
const REGION_ID: &str = "region_id";
const TABLE_ID: &str = "table_id";
const REGION_NUMBER: &str = "region_number";
+const REGION_ROWS: &str = "region_rows";
+const DISK_SIZE: &str = "disk_size";
const MEMTABLE_SIZE: &str = "memtable_size";
const MANIFEST_SIZE: &str = "manifest_size";
const SST_SIZE: &str = "sst_size";
+const INDEX_SIZE: &str = "index_size";
const ENGINE: &str = "engine";
const REGION_ROLE: &str = "region_role";
@@ -52,9 +55,12 @@ const INIT_CAPACITY: usize = 42;
/// - `region_id`: The region id.
/// - `table_id`: The table id.
/// - `region_number`: The region number.
+/// - `region_rows`: The number of rows in region.
/// - `memtable_size`: The memtable size in bytes.
+/// - `disk_size`: The approximate disk size in bytes.
/// - `manifest_size`: The manifest size in bytes.
-/// - `sst_size`: The sst size in bytes.
+/// - `sst_size`: The sst data files size in bytes.
+/// - `index_size`: The sst index files size in bytes.
/// - `engine`: The engine type.
/// - `region_role`: The region role.
///
@@ -76,9 +82,12 @@ impl InformationSchemaRegionStatistics {
ColumnSchema::new(REGION_ID, ConcreteDataType::uint64_datatype(), false),
ColumnSchema::new(TABLE_ID, ConcreteDataType::uint32_datatype(), false),
ColumnSchema::new(REGION_NUMBER, ConcreteDataType::uint32_datatype(), false),
+ ColumnSchema::new(REGION_ROWS, ConcreteDataType::uint64_datatype(), true),
+ ColumnSchema::new(DISK_SIZE, ConcreteDataType::uint64_datatype(), true),
ColumnSchema::new(MEMTABLE_SIZE, ConcreteDataType::uint64_datatype(), true),
ColumnSchema::new(MANIFEST_SIZE, ConcreteDataType::uint64_datatype(), true),
ColumnSchema::new(SST_SIZE, ConcreteDataType::uint64_datatype(), true),
+ ColumnSchema::new(INDEX_SIZE, ConcreteDataType::uint64_datatype(), true),
ColumnSchema::new(ENGINE, ConcreteDataType::string_datatype(), true),
ColumnSchema::new(REGION_ROLE, ConcreteDataType::string_datatype(), true),
]))
@@ -135,9 +144,12 @@ struct InformationSchemaRegionStatisticsBuilder {
region_ids: UInt64VectorBuilder,
table_ids: UInt32VectorBuilder,
region_numbers: UInt32VectorBuilder,
+ region_rows: UInt64VectorBuilder,
+ disk_sizes: UInt64VectorBuilder,
memtable_sizes: UInt64VectorBuilder,
manifest_sizes: UInt64VectorBuilder,
sst_sizes: UInt64VectorBuilder,
+ index_sizes: UInt64VectorBuilder,
engines: StringVectorBuilder,
region_roles: StringVectorBuilder,
}
@@ -150,9 +162,12 @@ impl InformationSchemaRegionStatisticsBuilder {
region_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
table_ids: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
region_numbers: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
+ region_rows: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
+ disk_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
memtable_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
manifest_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
sst_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
+ index_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
engines: StringVectorBuilder::with_capacity(INIT_CAPACITY),
region_roles: StringVectorBuilder::with_capacity(INIT_CAPACITY),
}
@@ -177,9 +192,12 @@ impl InformationSchemaRegionStatisticsBuilder {
(REGION_ID, &Value::from(region_stat.id.as_u64())),
(TABLE_ID, &Value::from(region_stat.id.table_id())),
(REGION_NUMBER, &Value::from(region_stat.id.region_number())),
+ (REGION_ROWS, &Value::from(region_stat.num_rows)),
+ (DISK_SIZE, &Value::from(region_stat.approximate_bytes)),
(MEMTABLE_SIZE, &Value::from(region_stat.memtable_size)),
(MANIFEST_SIZE, &Value::from(region_stat.manifest_size)),
(SST_SIZE, &Value::from(region_stat.sst_size)),
+ (INDEX_SIZE, &Value::from(region_stat.index_size)),
(ENGINE, &Value::from(region_stat.engine.as_str())),
(REGION_ROLE, &Value::from(region_stat.role.to_string())),
];
@@ -192,9 +210,12 @@ impl InformationSchemaRegionStatisticsBuilder {
self.table_ids.push(Some(region_stat.id.table_id()));
self.region_numbers
.push(Some(region_stat.id.region_number()));
+ self.region_rows.push(Some(region_stat.num_rows));
+ self.disk_sizes.push(Some(region_stat.approximate_bytes));
self.memtable_sizes.push(Some(region_stat.memtable_size));
self.manifest_sizes.push(Some(region_stat.manifest_size));
self.sst_sizes.push(Some(region_stat.sst_size));
+ self.index_sizes.push(Some(region_stat.index_size));
self.engines.push(Some(®ion_stat.engine));
self.region_roles.push(Some(®ion_stat.role.to_string()));
}
@@ -204,9 +225,12 @@ impl InformationSchemaRegionStatisticsBuilder {
Arc::new(self.region_ids.finish()),
Arc::new(self.table_ids.finish()),
Arc::new(self.region_numbers.finish()),
+ Arc::new(self.region_rows.finish()),
+ Arc::new(self.disk_sizes.finish()),
Arc::new(self.memtable_sizes.finish()),
Arc::new(self.manifest_sizes.finish()),
Arc::new(self.sst_sizes.finish()),
+ Arc::new(self.index_sizes.finish()),
Arc::new(self.engines.finish()),
Arc::new(self.region_roles.finish()),
];
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index 54d6e4d72cc5..251957dd285b 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -736,12 +736,14 @@ impl InformationExtension for StandaloneInformationExtension {
id: stat.region_id,
rcus: 0,
wcus: 0,
- approximate_bytes: region_stat.estimated_disk_size() as i64,
+ approximate_bytes: region_stat.estimated_disk_size(),
engine: stat.engine,
role: RegionRole::from(stat.role).into(),
+ num_rows: region_stat.num_rows,
memtable_size: region_stat.memtable_size,
manifest_size: region_stat.manifest_size,
sst_size: region_stat.sst_size,
+ index_size: region_stat.index_size,
}
})
.collect::<Vec<_>>();
diff --git a/src/common/meta/src/datanode.rs b/src/common/meta/src/datanode.rs
index 4551b8de2fb5..869af96a2828 100644
--- a/src/common/meta/src/datanode.rs
+++ b/src/common/meta/src/datanode.rs
@@ -78,17 +78,21 @@ pub struct RegionStat {
/// The write capacity units during this period
pub wcus: i64,
/// Approximate bytes of this region
- pub approximate_bytes: i64,
+ pub approximate_bytes: u64,
/// The engine name.
pub engine: String,
/// The region role.
pub role: RegionRole,
+ /// The number of rows
+ pub num_rows: u64,
/// The size of the memtable in bytes.
pub memtable_size: u64,
/// The size of the manifest in bytes.
pub manifest_size: u64,
- /// The size of the SST files in bytes.
+ /// The size of the SST data files in bytes.
pub sst_size: u64,
+ /// The size of the SST index files in bytes.
+ pub index_size: u64,
}
impl Stat {
@@ -178,12 +182,14 @@ impl From<&api::v1::meta::RegionStat> for RegionStat {
id: RegionId::from_u64(value.region_id),
rcus: value.rcus,
wcus: value.wcus,
- approximate_bytes: value.approximate_bytes,
+ approximate_bytes: value.approximate_bytes as u64,
engine: value.engine.to_string(),
role: RegionRole::from(value.role()),
+ num_rows: region_stat.num_rows,
memtable_size: region_stat.memtable_size,
manifest_size: region_stat.manifest_size,
sst_size: region_stat.sst_size,
+ index_size: region_stat.index_size,
}
}
}
diff --git a/src/meta-srv/src/handler/failure_handler.rs b/src/meta-srv/src/handler/failure_handler.rs
index 02f423c4b418..ae38f887f4aa 100644
--- a/src/meta-srv/src/handler/failure_handler.rs
+++ b/src/meta-srv/src/handler/failure_handler.rs
@@ -93,9 +93,11 @@ mod tests {
approximate_bytes: 0,
engine: default_engine().to_string(),
role: RegionRole::Follower,
+ num_rows: 0,
memtable_size: 0,
manifest_size: 0,
sst_size: 0,
+ index_size: 0,
}
}
acc.stat = Some(Stat {
diff --git a/src/meta-srv/src/handler/region_lease_handler.rs b/src/meta-srv/src/handler/region_lease_handler.rs
index de491da37150..98a74f67bba7 100644
--- a/src/meta-srv/src/handler/region_lease_handler.rs
+++ b/src/meta-srv/src/handler/region_lease_handler.rs
@@ -135,9 +135,11 @@ mod test {
wcus: 0,
approximate_bytes: 0,
engine: String::new(),
+ num_rows: 0,
memtable_size: 0,
manifest_size: 0,
sst_size: 0,
+ index_size: 0,
}
}
diff --git a/src/meta-srv/src/selector/weight_compute.rs b/src/meta-srv/src/selector/weight_compute.rs
index 09d8833e2e0e..7f3b28a364ea 100644
--- a/src/meta-srv/src/selector/weight_compute.rs
+++ b/src/meta-srv/src/selector/weight_compute.rs
@@ -198,9 +198,11 @@ mod tests {
approximate_bytes: 1,
engine: "mito2".to_string(),
role: RegionRole::Leader,
+ num_rows: 0,
memtable_size: 0,
manifest_size: 0,
sst_size: 0,
+ index_size: 0,
}],
..Default::default()
}
@@ -217,9 +219,11 @@ mod tests {
approximate_bytes: 1,
engine: "mito2".to_string(),
role: RegionRole::Leader,
+ num_rows: 0,
memtable_size: 0,
manifest_size: 0,
sst_size: 0,
+ index_size: 0,
}],
..Default::default()
}
@@ -236,9 +240,11 @@ mod tests {
approximate_bytes: 1,
engine: "mito2".to_string(),
role: RegionRole::Leader,
+ num_rows: 0,
memtable_size: 0,
manifest_size: 0,
sst_size: 0,
+ index_size: 0,
}],
..Default::default()
}
diff --git a/src/mito2/src/engine/basic_test.rs b/src/mito2/src/engine/basic_test.rs
index 533b6a2ea1e1..785c914e3ee5 100644
--- a/src/mito2/src/engine/basic_test.rs
+++ b/src/mito2/src/engine/basic_test.rs
@@ -580,7 +580,8 @@ async fn test_region_usage() {
flush_region(&engine, region_id, None).await;
let region_stat = region.region_statistic();
- assert_eq!(region_stat.sst_size, 3010);
+ assert_eq!(region_stat.sst_size, 2790);
+ assert_eq!(region_stat.num_rows, 10);
// region total usage
// Some memtables may share items.
diff --git a/src/mito2/src/memtable/time_partition.rs b/src/mito2/src/memtable/time_partition.rs
index 6d92488a7b33..7fa03ae1bed4 100644
--- a/src/mito2/src/memtable/time_partition.rs
+++ b/src/mito2/src/memtable/time_partition.rs
@@ -216,6 +216,16 @@ impl TimePartitions {
.sum()
}
+ /// Returns the number of rows.
+ pub(crate) fn num_rows(&self) -> u64 {
+ let inner = self.inner.lock().unwrap();
+ inner
+ .parts
+ .iter()
+ .map(|part| part.memtable.stats().num_rows as u64)
+ .sum()
+ }
+
/// Append memtables in partitions to small vec.
pub(crate) fn list_memtables_to_small_vec(&self, memtables: &mut SmallMemtableVec) {
let inner = self.inner.lock().unwrap();
diff --git a/src/mito2/src/memtable/version.rs b/src/mito2/src/memtable/version.rs
index 9e18edc67345..1c7f2b7d4a25 100644
--- a/src/mito2/src/memtable/version.rs
+++ b/src/mito2/src/memtable/version.rs
@@ -115,6 +115,15 @@ impl MemtableVersion {
.sum()
}
+ /// Returns the number of rows in memtables.
+ pub(crate) fn num_rows(&self) -> u64 {
+ self.immutables
+ .iter()
+ .map(|mem| mem.stats().num_rows as u64)
+ .sum::<u64>()
+ + self.mutable.num_rows()
+ }
+
/// Returns true if the memtable version is empty.
///
/// The version is empty when mutable memtable is empty and there is no
diff --git a/src/mito2/src/region.rs b/src/mito2/src/region.rs
index b05daf3da076..4ce633e6a6c2 100644
--- a/src/mito2/src/region.rs
+++ b/src/mito2/src/region.rs
@@ -277,15 +277,19 @@ impl MitoRegion {
let memtable_usage = (memtables.mutable_usage() + memtables.immutables_usage()) as u64;
let sst_usage = version.ssts.sst_usage();
+ let index_usage = version.ssts.index_usage();
let wal_usage = self.estimated_wal_usage(memtable_usage);
let manifest_usage = self.stats.total_manifest_size();
+ let num_rows = version.ssts.num_rows() + version.memtables.num_rows();
RegionStatistic {
+ num_rows,
memtable_size: memtable_usage,
wal_size: wal_usage,
manifest_size: manifest_usage,
sst_size: sst_usage,
+ index_size: index_usage,
}
}
@@ -422,15 +426,15 @@ impl ManifestContext {
/// Sets the [`RegionRole`].
///
/// ```
- /// +------------------------------------------+
- /// | +-----------------+ |
- /// | | | |
+ /// +------------------------------------------+
+ /// | +-----------------+ |
+ /// | | | |
/// +---+------+ +-------+-----+ +--v-v---+
/// | Follower | | Downgrading | | Leader |
/// +---^-^----+ +-----+-^-----+ +--+-+---+
- /// | | | | | |
- /// | +------------------+ +-----------------+ |
- /// +------------------------------------------+
+ /// | | | | | |
+ /// | +------------------+ +-----------------+ |
+ /// +------------------------------------------+
///
/// Transition:
/// - Follower -> Leader
diff --git a/src/mito2/src/sst/version.rs b/src/mito2/src/sst/version.rs
index 07d6bee9d946..c677a9541344 100644
--- a/src/mito2/src/sst/version.rs
+++ b/src/mito2/src/sst/version.rs
@@ -84,7 +84,25 @@ impl SstVersion {
}
}
- /// Returns SST files'space occupied in current version.
+ /// Returns the number of rows in SST files.
+ /// For historical reasons, the result is not precise for old SST files.
+ pub(crate) fn num_rows(&self) -> u64 {
+ self.levels
+ .iter()
+ .map(|level_meta| {
+ level_meta
+ .files
+ .values()
+ .map(|file_handle| {
+ let meta = file_handle.meta_ref();
+ meta.num_rows
+ })
+ .sum::<u64>()
+ })
+ .sum()
+ }
+
+ /// Returns SST data files'space occupied in current version.
pub(crate) fn sst_usage(&self) -> u64 {
self.levels
.iter()
@@ -94,7 +112,24 @@ impl SstVersion {
.values()
.map(|file_handle| {
let meta = file_handle.meta_ref();
- meta.file_size + meta.index_file_size
+ meta.file_size
+ })
+ .sum::<u64>()
+ })
+ .sum()
+ }
+
+ /// Returns SST index files'space occupied in current version.
+ pub(crate) fn index_usage(&self) -> u64 {
+ self.levels
+ .iter()
+ .map(|level_meta| {
+ level_meta
+ .files
+ .values()
+ .map(|file_handle| {
+ let meta = file_handle.meta_ref();
+ meta.index_file_size
})
.sum::<u64>()
})
diff --git a/src/store-api/src/region_engine.rs b/src/store-api/src/region_engine.rs
index 785e66d37f23..0832385c930b 100644
--- a/src/store-api/src/region_engine.rs
+++ b/src/store-api/src/region_engine.rs
@@ -291,14 +291,20 @@ pub type BatchResponses = Vec<(RegionId, Result<RegionResponse, BoxedError>)>;
/// Represents the statistics of a region.
#[derive(Debug, Deserialize, Serialize, Default)]
pub struct RegionStatistic {
+ /// The number of rows
+ #[serde(default)]
+ pub num_rows: u64,
/// The size of memtable in bytes.
pub memtable_size: u64,
/// The size of WAL in bytes.
pub wal_size: u64,
/// The size of manifest in bytes.
pub manifest_size: u64,
- /// The size of SST files in bytes.
+ /// The size of SST data files in bytes.
pub sst_size: u64,
+ /// The size of SST index files in bytes.
+ #[serde(default)]
+ pub index_size: u64,
}
impl RegionStatistic {
@@ -320,7 +326,7 @@ impl RegionStatistic {
impl RegionStatistic {
/// Returns the estimated disk size of the region.
pub fn estimated_disk_size(&self) -> u64 {
- self.wal_size + self.sst_size + self.manifest_size
+ self.wal_size + self.sst_size + self.manifest_size + self.index_size
}
}
diff --git a/tests/cases/standalone/common/information_schema/region_statistics.result b/tests/cases/standalone/common/information_schema/region_statistics.result
new file mode 100644
index 000000000000..0c62b4ad6cd0
--- /dev/null
+++ b/tests/cases/standalone/common/information_schema/region_statistics.result
@@ -0,0 +1,39 @@
+USE public;
+
+Affected Rows: 0
+
+CREATE TABLE test (
+ a int primary key,
+ b string,
+ ts timestamp time index,
+) PARTITION ON COLUMNS (a) (
+ a < 10,
+ a >= 10 AND a < 20,
+ a >= 20,
+);
+
+Affected Rows: 0
+
+INSERT INTO test VALUES
+ (1, 'a', 1),
+ (11, 'b', 11),
+ (21, 'c', 21);
+
+Affected Rows: 3
+
+-- SQLNESS SLEEP 11s
+-- FIXME(dennis): we need to wait the datanode reporting stats info to metasrv.
+SELECT SUM(region_rows), SUM(disk_size), SUM(sst_size), SUM(index_size)
+ FROM INFORMATION_SCHEMA.REGION_STATISTICS WHERE table_id
+ IN (SELECT TABLE_ID FROM INFORMATION_SCHEMA.TABLES WHERE table_name = 'test' and table_schema = 'public');
+
++-------------------------------------------------------+-----------------------------------------------------+----------------------------------------------------+------------------------------------------------------+
+| SUM(information_schema.region_statistics.region_rows) | SUM(information_schema.region_statistics.disk_size) | SUM(information_schema.region_statistics.sst_size) | SUM(information_schema.region_statistics.index_size) |
++-------------------------------------------------------+-----------------------------------------------------+----------------------------------------------------+------------------------------------------------------+
+| 3 | 2145 | 0 | 0 |
++-------------------------------------------------------+-----------------------------------------------------+----------------------------------------------------+------------------------------------------------------+
+
+DROP TABLE test;
+
+Affected Rows: 0
+
diff --git a/tests/cases/standalone/common/information_schema/region_statistics.sql b/tests/cases/standalone/common/information_schema/region_statistics.sql
new file mode 100644
index 000000000000..cbc4424683a6
--- /dev/null
+++ b/tests/cases/standalone/common/information_schema/region_statistics.sql
@@ -0,0 +1,25 @@
+USE public;
+
+CREATE TABLE test (
+ a int primary key,
+ b string,
+ ts timestamp time index,
+) PARTITION ON COLUMNS (a) (
+ a < 10,
+ a >= 10 AND a < 20,
+ a >= 20,
+);
+
+
+INSERT INTO test VALUES
+ (1, 'a', 1),
+ (11, 'b', 11),
+ (21, 'c', 21);
+
+-- SQLNESS SLEEP 11s
+-- FIXME(dennis): we need to wait the datanode reporting stats info to metasrv.
+SELECT SUM(region_rows), SUM(disk_size), SUM(sst_size), SUM(index_size)
+ FROM INFORMATION_SCHEMA.REGION_STATISTICS WHERE table_id
+ IN (SELECT TABLE_ID FROM INFORMATION_SCHEMA.TABLES WHERE table_name = 'test' and table_schema = 'public');
+
+DROP TABLE test;
diff --git a/tests/cases/standalone/common/system/information_schema.result b/tests/cases/standalone/common/system/information_schema.result
index 4264bd8df66e..b1c8c9329514 100644
--- a/tests/cases/standalone/common/system/information_schema.result
+++ b/tests/cases/standalone/common/system/information_schema.result
@@ -299,13 +299,16 @@ select * from information_schema.columns order by table_schema, table_name, colu
| greptime | information_schema | region_peers | peer_id | 2 | | | 20 | 0 | | | | | | select,insert | | UInt64 | bigint unsigned | FIELD | | Yes | bigint unsigned | | |
| greptime | information_schema | region_peers | region_id | 1 | | | 20 | 0 | | | | | | select,insert | | UInt64 | bigint unsigned | FIELD | | No | bigint unsigned | | |
| greptime | information_schema | region_peers | status | 5 | 2147483647 | 2147483647 | | | | utf8 | utf8_bin | | | select,insert | | String | string | FIELD | | Yes | string | | |
-| greptime | information_schema | region_statistics | engine | 7 | 2147483647 | 2147483647 | | | | utf8 | utf8_bin | | | select,insert | | String | string | FIELD | | Yes | string | | |
-| greptime | information_schema | region_statistics | manifest_size | 5 | | | 20 | 0 | | | | | | select,insert | | UInt64 | bigint unsigned | FIELD | | Yes | bigint unsigned | | |
-| greptime | information_schema | region_statistics | memtable_size | 4 | | | 20 | 0 | | | | | | select,insert | | UInt64 | bigint unsigned | FIELD | | Yes | bigint unsigned | | |
+| greptime | information_schema | region_statistics | disk_size | 5 | | | 20 | 0 | | | | | | select,insert | | UInt64 | bigint unsigned | FIELD | | Yes | bigint unsigned | | |
+| greptime | information_schema | region_statistics | engine | 10 | 2147483647 | 2147483647 | | | | utf8 | utf8_bin | | | select,insert | | String | string | FIELD | | Yes | string | | |
+| greptime | information_schema | region_statistics | index_size | 9 | | | 20 | 0 | | | | | | select,insert | | UInt64 | bigint unsigned | FIELD | | Yes | bigint unsigned | | |
+| greptime | information_schema | region_statistics | manifest_size | 7 | | | 20 | 0 | | | | | | select,insert | | UInt64 | bigint unsigned | FIELD | | Yes | bigint unsigned | | |
+| greptime | information_schema | region_statistics | memtable_size | 6 | | | 20 | 0 | | | | | | select,insert | | UInt64 | bigint unsigned | FIELD | | Yes | bigint unsigned | | |
| greptime | information_schema | region_statistics | region_id | 1 | | | 20 | 0 | | | | | | select,insert | | UInt64 | bigint unsigned | FIELD | | No | bigint unsigned | | |
| greptime | information_schema | region_statistics | region_number | 3 | | | 10 | 0 | | | | | | select,insert | | UInt32 | int unsigned | FIELD | | No | int unsigned | | |
-| greptime | information_schema | region_statistics | region_role | 8 | 2147483647 | 2147483647 | | | | utf8 | utf8_bin | | | select,insert | | String | string | FIELD | | Yes | string | | |
-| greptime | information_schema | region_statistics | sst_size | 6 | | | 20 | 0 | | | | | | select,insert | | UInt64 | bigint unsigned | FIELD | | Yes | bigint unsigned | | |
+| greptime | information_schema | region_statistics | region_role | 11 | 2147483647 | 2147483647 | | | | utf8 | utf8_bin | | | select,insert | | String | string | FIELD | | Yes | string | | |
+| greptime | information_schema | region_statistics | region_rows | 4 | | | 20 | 0 | | | | | | select,insert | | UInt64 | bigint unsigned | FIELD | | Yes | bigint unsigned | | |
+| greptime | information_schema | region_statistics | sst_size | 8 | | | 20 | 0 | | | | | | select,insert | | UInt64 | bigint unsigned | FIELD | | Yes | bigint unsigned | | |
| greptime | information_schema | region_statistics | table_id | 2 | | | 10 | 0 | | | | | | select,insert | | UInt32 | int unsigned | FIELD | | No | int unsigned | | |
| greptime | information_schema | routines | character_maximum_length | 7 | | | 19 | 0 | | | | | | select,insert | | Int64 | bigint | FIELD | | No | bigint | | |
| greptime | information_schema | routines | character_octet_length | 8 | | | 19 | 0 | | | | | | select,insert | | Int64 | bigint | FIELD | | No | bigint | | |
diff --git a/tests/conf/datanode-test.toml.template b/tests/conf/datanode-test.toml.template
index 5ed5352124df..3c999635d91c 100644
--- a/tests/conf/datanode-test.toml.template
+++ b/tests/conf/datanode-test.toml.template
@@ -32,3 +32,6 @@ tcp_nodelay = false
[procedure]
max_retry_times = 3
retry_delay = "500ms"
+
+[heartbeat]
+interval = '1s'
|
feat
|
adds the number of rows and index files size to region_statistics table (#4909)
|
d6d46378a1b1bd34b99683d3f94d9a585ddfe91a
|
2023-09-18 16:22:14
|
Zhenchi
|
test: fix some integration tests (#2432)
| false
|
diff --git a/tests-integration/src/grpc.rs b/tests-integration/src/grpc.rs
index 3e01cfb339b0..4bb7b2be8e41 100644
--- a/tests-integration/src/grpc.rs
+++ b/tests-integration/src/grpc.rs
@@ -347,7 +347,7 @@ CREATE TABLE {table_name} (
.collect(),
..Default::default()
}),
- semantic_type: SemanticType::Field as i32,
+ semantic_type: SemanticType::Tag as i32,
datatype: ColumnDataType::String as i32,
..Default::default()
},
@@ -421,7 +421,7 @@ CREATE TABLE {table_name} (
},
Column {
column_name: "b".to_string(),
- semantic_type: SemanticType::Field as i32,
+ semantic_type: SemanticType::Tag as i32,
values: Some(Values {
string_values: b,
..Default::default()
diff --git a/tests-integration/src/tests/instance_test.rs b/tests-integration/src/tests/instance_test.rs
index 543d78290dd7..b83c0fe55a3a 100644
--- a/tests-integration/src/tests/instance_test.rs
+++ b/tests-integration/src/tests/instance_test.rs
@@ -23,6 +23,7 @@ use common_test_util::temp_dir;
use datatypes::vectors::{StringVector, TimestampMillisecondVector, UInt64Vector, VectorRef};
use frontend::error::{Error, Result};
use frontend::instance::Instance;
+use operator::error::Error as OperatorError;
use rstest::rstest;
use rstest_reuse::apply;
use servers::query_handler::sql::SqlQueryHandler;
@@ -375,20 +376,28 @@ async fn test_execute_insert_by_select(instance: Arc<dyn MockInstance>) {
try_execute_sql(&instance, "insert into demo2(host) select * from demo1")
.await
.unwrap_err(),
- Error::PlanStatement { .. }
+ Error::TableOperation {
+ source: OperatorError::PlanStatement { .. },
+ ..
+ }
));
assert!(matches!(
try_execute_sql(&instance, "insert into demo2 select cpu,memory from demo1")
.await
.unwrap_err(),
- Error::PlanStatement { .. }
+ Error::TableOperation {
+ source: OperatorError::PlanStatement { .. },
+ ..
+ }
));
-
assert!(matches!(
try_execute_sql(&instance, "insert into demo2(ts) select memory from demo1")
.await
.unwrap_err(),
- Error::PlanStatement { .. }
+ Error::TableOperation {
+ source: OperatorError::PlanStatement { .. },
+ ..
+ }
));
let output = execute_sql(&instance, "insert into demo2 select * from demo1").await;
@@ -705,6 +714,8 @@ async fn test_execute_query_external_table_parquet(instance: Arc<dyn MockInstanc
#[apply(both_instances_cases)]
async fn test_execute_query_external_table_orc(instance: Arc<dyn MockInstance>) {
+ std::env::set_var("TZ", "UTC");
+
let instance = instance.frontend();
let format = "orc";
let location = find_testing_resource("/src/common/datasource/tests/orc/test.orc");
@@ -781,6 +792,8 @@ async fn test_execute_query_external_table_orc(instance: Arc<dyn MockInstance>)
#[apply(both_instances_cases)]
async fn test_execute_query_external_table_orc_with_schema(instance: Arc<dyn MockInstance>) {
+ std::env::set_var("TZ", "UTC");
+
let instance = instance.frontend();
let format = "orc";
let location = find_testing_resource("/src/common/datasource/tests/orc/test.orc");
@@ -830,6 +843,8 @@ async fn test_execute_query_external_table_orc_with_schema(instance: Arc<dyn Moc
#[apply(both_instances_cases)]
async fn test_execute_query_external_table_csv(instance: Arc<dyn MockInstance>) {
+ std::env::set_var("TZ", "UTC");
+
let instance = instance.frontend();
let format = "csv";
let location = find_testing_resource("/tests/data/csv/various_type.csv");
@@ -877,6 +892,8 @@ async fn test_execute_query_external_table_csv(instance: Arc<dyn MockInstance>)
#[apply(both_instances_cases)]
async fn test_execute_query_external_table_json(instance: Arc<dyn MockInstance>) {
+ std::env::set_var("TZ", "UTC");
+
let instance = instance.frontend();
let format = "json";
let location = find_testing_resource("/tests/data/json/various_type.json");
@@ -931,6 +948,8 @@ async fn test_execute_query_external_table_json(instance: Arc<dyn MockInstance>)
#[apply(both_instances_cases)]
async fn test_execute_query_external_table_json_with_schema(instance: Arc<dyn MockInstance>) {
+ std::env::set_var("TZ", "UTC");
+
let instance = instance.frontend();
let format = "json";
let location = find_testing_resource("/tests/data/json/various_type.json");
@@ -993,6 +1012,8 @@ async fn test_execute_query_external_table_json_with_schema(instance: Arc<dyn Mo
#[apply(both_instances_cases)]
async fn test_execute_query_external_table_json_type_cast(instance: Arc<dyn MockInstance>) {
+ std::env::set_var("TZ", "UTC");
+
let instance = instance.frontend();
let format = "json";
let location = find_testing_resource("/tests/data/json/type_cast.json");
@@ -1059,6 +1080,8 @@ async fn test_execute_query_external_table_json_type_cast(instance: Arc<dyn Mock
#[apply(both_instances_cases)]
async fn test_execute_query_external_table_json_default_ts_column(instance: Arc<dyn MockInstance>) {
+ std::env::set_var("TZ", "UTC");
+
let instance = instance.frontend();
let format = "json";
let location = find_testing_resource("/tests/data/json/default_ts_column.json");
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index 0f97bca5a030..e5b40c026bb1 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -605,15 +605,17 @@ pub async fn test_config_api(store_type: StorageType) {
assert_eq!(res_get.status(), StatusCode::OK);
let expected_toml_str = format!(
r#"mode = "standalone"
+node_id = 0
+coordination = false
rpc_addr = "127.0.0.1:3001"
rpc_runtime_size = 8
enable_telemetry = true
[heartbeat]
-interval_millis = 5000
-retry_interval_millis = 5000
+interval_millis = 3000
+retry_interval_millis = 3000
-[http_opts]
+[http]
addr = "127.0.0.1:4000"
timeout = "30s"
body_limit = "64MiB"
@@ -658,6 +660,10 @@ auto_flush_interval = "30m"
global_write_buffer_size = "1GiB"
global_write_buffer_reject_size = "2GiB"
+[[region_engine]]
+
+[region_engine.file]
+
[logging]
enable_jaeger_tracing = false"#,
store_type
|
test
|
fix some integration tests (#2432)
|
1272bc9afc3111d26f799f343a03f395253a1f99
|
2024-04-24 15:08:03
|
Ruihang Xia
|
fix: post process result on query full column name of prom labels API (#3793)
| false
|
diff --git a/src/servers/src/http/prometheus.rs b/src/servers/src/http/prometheus.rs
index b02b9b8bd70d..4f453a1ea43b 100644
--- a/src/servers/src/http/prometheus.rs
+++ b/src/servers/src/http/prometheus.rs
@@ -21,7 +21,6 @@ use catalog::CatalogManagerRef;
use common_catalog::parse_catalog_and_schema_from_db_string;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
-use common_query::prelude::{GREPTIME_TIMESTAMP, GREPTIME_VALUE};
use common_query::{Output, OutputData};
use common_recordbatch::RecordBatches;
use common_telemetry::tracing;
@@ -312,17 +311,26 @@ pub async fn labels_query(
if queries.is_empty() {
queries = form_params.matches.0;
}
- if queries.is_empty() {
- match get_all_column_names(&catalog, &schema, &handler.catalog_manager()).await {
- Ok(labels) => {
- return PrometheusJsonResponse::success(PrometheusResponse::Labels(labels))
- }
- Err(e) => {
- return PrometheusJsonResponse::error(e.status_code().to_string(), e.output_msg())
- }
+
+ // Fetch all tag columns. It will be used as white-list for tag names.
+ let mut labels = match get_all_column_names(&catalog, &schema, &handler.catalog_manager()).await
+ {
+ Ok(labels) => labels,
+ Err(e) => {
+ return PrometheusJsonResponse::error(e.status_code().to_string(), e.output_msg())
}
+ };
+ // insert the special metric name label
+ let _ = labels.insert(METRIC_NAME.to_string());
+
+ // Fetch all columns if no query matcher is provided
+ if queries.is_empty() {
+ let mut labels_vec = labels.into_iter().collect::<Vec<_>>();
+ labels_vec.sort_unstable();
+ return PrometheusJsonResponse::success(PrometheusResponse::Labels(labels_vec));
}
+ // Otherwise, run queries and extract column name from result set.
let start = params
.start
.or(form_params.start)
@@ -331,14 +339,13 @@ pub async fn labels_query(
.end
.or(form_params.end)
.unwrap_or_else(current_time_rfc3339);
-
let lookback = params
.lookback
.or(form_params.lookback)
.unwrap_or_else(|| DEFAULT_LOOKBACK_STRING.to_string());
- let mut labels = HashSet::new();
- let _ = labels.insert(METRIC_NAME.to_string());
+ let mut fetched_labels = HashSet::new();
+ let _ = fetched_labels.insert(METRIC_NAME.to_string());
let mut merge_map = HashMap::new();
for query in queries {
@@ -352,7 +359,8 @@ pub async fn labels_query(
let result = handler.do_query(&prom_query, query_ctx.clone()).await;
if let Err(err) =
- retrieve_labels_name_from_query_result(result, &mut labels, &mut merge_map).await
+ retrieve_labels_name_from_query_result(result, &mut fetched_labels, &mut merge_map)
+ .await
{
// Prometheus won't report error if querying nonexist label and metric
if err.status_code() != StatusCode::TableNotFound
@@ -366,10 +374,11 @@ pub async fn labels_query(
}
}
- let _ = labels.remove(GREPTIME_TIMESTAMP);
- let _ = labels.remove(GREPTIME_VALUE);
+ // intersect `fetched_labels` with `labels` to filter out non-tag columns
+ fetched_labels.retain(|l| labels.contains(l));
+ let _ = labels.insert(METRIC_NAME.to_string());
- let mut sorted_labels: Vec<String> = labels.into_iter().collect();
+ let mut sorted_labels: Vec<String> = fetched_labels.into_iter().collect();
sorted_labels.sort();
let merge_map = merge_map
.into_iter()
@@ -380,11 +389,12 @@ pub async fn labels_query(
resp
}
+/// Get all tag column name of the given schema
async fn get_all_column_names(
catalog: &str,
schema: &str,
manager: &CatalogManagerRef,
-) -> std::result::Result<Vec<String>, catalog::error::Error> {
+) -> std::result::Result<HashSet<String>, catalog::error::Error> {
let table_names = manager.table_names(catalog, schema).await?;
let mut labels = HashSet::new();
@@ -392,15 +402,12 @@ async fn get_all_column_names(
let Some(table) = manager.table(catalog, schema, &table_name).await? else {
continue;
};
- let schema = table.schema();
- for column in schema.column_schemas() {
- labels.insert(column.name.to_string());
+ for column in table.primary_key_columns() {
+ labels.insert(column.name);
}
}
- let mut labels_vec = labels.into_iter().collect::<Vec<_>>();
- labels_vec.sort_unstable();
- Ok(labels_vec)
+ Ok(labels)
}
async fn retrieve_series_from_query_result(
diff --git a/src/table/src/table.rs b/src/table/src/table.rs
index a0a45a07395f..44406c24b239 100644
--- a/src/table/src/table.rs
+++ b/src/table/src/table.rs
@@ -16,7 +16,7 @@ use std::sync::Arc;
use common_query::logical_plan::Expr;
use common_recordbatch::SendableRecordBatchStream;
-use datatypes::schema::SchemaRef;
+use datatypes::schema::{ColumnSchema, SchemaRef};
use snafu::ResultExt;
use store_api::data_source::DataSourceRef;
use store_api::storage::ScanRequest;
@@ -81,4 +81,13 @@ impl Table {
pub fn supports_filters_pushdown(&self, filters: &[&Expr]) -> Result<Vec<FilterPushDownType>> {
Ok(vec![self.filter_pushdown; filters.len()])
}
+
+ /// Get primary key columns in the definition order.
+ pub fn primary_key_columns(&self) -> impl Iterator<Item = ColumnSchema> + '_ {
+ self.table_info
+ .meta
+ .primary_key_indices
+ .iter()
+ .map(|i| self.table_info.meta.schema.column_schemas()[*i].clone())
+ }
}
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index 7a3a1a43c55c..d269859b869a 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -463,15 +463,19 @@ pub async fn test_prom_http_api(store_type: StorageType) {
assert_eq!(body.status, "success");
assert_eq!(
body.data,
- serde_json::from_value::<PrometheusResponse>(json!([
- "__name__", "cpu", "host", "memory", "ts"
- ]))
- .unwrap()
+ serde_json::from_value::<PrometheusResponse>(json!(["__name__", "host",])).unwrap()
);
// labels without match[] param
let res = client.get("/v1/prometheus/api/v1/labels").send().await;
assert_eq!(res.status(), StatusCode::OK);
+ let body = serde_json::from_str::<PrometheusJsonResponse>(&res.text().await).unwrap();
+ assert_eq!(body.status, "success");
+ assert_eq!(
+ body.data,
+ serde_json::from_value::<PrometheusResponse>(json!(["__name__", "host", "number",]))
+ .unwrap()
+ );
// labels query with multiple match[] params
let res = client
|
fix
|
post process result on query full column name of prom labels API (#3793)
|
91820a8006fa2a1aaed00314c26293691583bed2
|
2023-11-17 16:48:14
|
WU Jingdi
|
fix: empty by in range query (#2770)
| false
|
diff --git a/src/query/src/range_select/plan_rewrite.rs b/src/query/src/range_select/plan_rewrite.rs
index ca7689af20b7..fb1872c37580 100644
--- a/src/query/src/range_select/plan_rewrite.rs
+++ b/src/query/src/range_select/plan_rewrite.rs
@@ -342,6 +342,12 @@ impl RangePlanRewriter {
.row_key_column_names()
.map(|key| Expr::Column(Column::new(Some(table_ref.clone()), key)))
.collect();
+ // If the user does not specify a primary key when creating a table,
+ // then by default all data will be aggregated into one time series,
+ // which is equivalent to using `by(1)` in SQL
+ if default_by.is_empty() {
+ default_by = vec![Expr::Literal(ScalarValue::Int64(Some(1)))];
+ }
time_index_expr = Expr::Column(Column::new(
Some(table_ref.clone()),
time_index_column.name.clone(),
diff --git a/tests/cases/standalone/common/range/by.result b/tests/cases/standalone/common/range/by.result
index d3f1d7c77810..3c1d83d0bc37 100644
--- a/tests/cases/standalone/common/range/by.result
+++ b/tests/cases/standalone/common/range/by.result
@@ -69,3 +69,39 @@ DROP TABLE host;
Affected Rows: 0
+-- Test no primary key and by keyword
+CREATE TABLE host (
+ ts timestamp(3) time index,
+ host STRING,
+ val BIGINT,
+);
+
+Affected Rows: 0
+
+INSERT INTO TABLE host VALUES
+ (0, 'host1', 0),
+ (5000, 'host1', null),
+ (10000, 'host1', 1),
+ (15000, 'host1', null),
+ (20000, 'host1', 2),
+ (0, 'host2', 3),
+ (5000, 'host2', null),
+ (10000, 'host2', 4),
+ (15000, 'host2', null),
+ (20000, 'host2', 5);
+
+Affected Rows: 10
+
+SELECT ts, max(val) RANGE '5s' FROM host ALIGN '20s' ORDER BY ts;
+
++---------------------+----------------------------------+
+| ts | MAX(host.val) RANGE 5s FILL NULL |
++---------------------+----------------------------------+
+| 1970-01-01T00:00:00 | 3 |
+| 1970-01-01T00:00:20 | 5 |
++---------------------+----------------------------------+
+
+DROP TABLE host;
+
+Affected Rows: 0
+
diff --git a/tests/cases/standalone/common/range/by.sql b/tests/cases/standalone/common/range/by.sql
index 1ee99924cc69..b7aae24c97e9 100644
--- a/tests/cases/standalone/common/range/by.sql
+++ b/tests/cases/standalone/common/range/by.sql
@@ -34,3 +34,27 @@ SELECT ts, CAST(length(host) as INT64) + 2, max(val) RANGE '5s' FROM host ALIGN
SELECT ts, host, max(val) RANGE '5s' FROM host ALIGN '20s' BY () ORDER BY ts;
DROP TABLE host;
+
+-- Test no primary key and by keyword
+
+CREATE TABLE host (
+ ts timestamp(3) time index,
+ host STRING,
+ val BIGINT,
+);
+
+INSERT INTO TABLE host VALUES
+ (0, 'host1', 0),
+ (5000, 'host1', null),
+ (10000, 'host1', 1),
+ (15000, 'host1', null),
+ (20000, 'host1', 2),
+ (0, 'host2', 3),
+ (5000, 'host2', null),
+ (10000, 'host2', 4),
+ (15000, 'host2', null),
+ (20000, 'host2', 5);
+
+SELECT ts, max(val) RANGE '5s' FROM host ALIGN '20s' ORDER BY ts;
+
+DROP TABLE host;
diff --git a/tests/cases/standalone/common/range/error.result b/tests/cases/standalone/common/range/error.result
index cf720ee9f1e3..eeead0c8b2d8 100644
--- a/tests/cases/standalone/common/range/error.result
+++ b/tests/cases/standalone/common/range/error.result
@@ -35,6 +35,10 @@ SELECT min(val) FROM host ALIGN '5s';
Error: 2000(InvalidSyntax), sql parser error: Illegal Range select, no RANGE keyword found in any SelectItem
+SELECT 1 FROM host ALIGN '5s';
+
+Error: 2000(InvalidSyntax), sql parser error: Illegal Range select, no RANGE keyword found in any SelectItem
+
SELECT min(val) RANGE '10s', max(val) FROM host ALIGN '5s';
Error: 3001(EngineExecuteQuery), No field named "MAX(host.val)". Valid fields are "MIN(host.val) RANGE 10s FILL NULL", host.ts, host.host.
diff --git a/tests/cases/standalone/common/range/error.sql b/tests/cases/standalone/common/range/error.sql
index 19cf55d2285e..86ceda4ea1ab 100644
--- a/tests/cases/standalone/common/range/error.sql
+++ b/tests/cases/standalone/common/range/error.sql
@@ -28,6 +28,8 @@ SELECT min(val) RANGE '5s' FROM host ALIGN 'not_time';
SELECT min(val) FROM host ALIGN '5s';
+SELECT 1 FROM host ALIGN '5s';
+
SELECT min(val) RANGE '10s', max(val) FROM host ALIGN '5s';
SELECT min(val) * 2 RANGE '10s' FROM host ALIGN '5s';
|
fix
|
empty by in range query (#2770)
|
a9f21915efacacd7ac10d76bf2caa342776f2490
|
2024-12-25 20:00:07
|
Zhenchi
|
feat(bloom-filter): integrate indexer with mito2 (#5236)
| false
|
diff --git a/src/datatypes/src/schema.rs b/src/datatypes/src/schema.rs
index c537a4608b42..19f3c6e55fb1 100644
--- a/src/datatypes/src/schema.rs
+++ b/src/datatypes/src/schema.rs
@@ -29,7 +29,7 @@ use crate::error::{self, DuplicateColumnSnafu, Error, ProjectArrowSchemaSnafu, R
use crate::prelude::ConcreteDataType;
pub use crate::schema::column_schema::{
ColumnSchema, FulltextAnalyzer, FulltextOptions, Metadata, SkippingIndexOptions,
- COLUMN_FULLTEXT_CHANGE_OPT_KEY_ENABLE, COLUMN_FULLTEXT_OPT_KEY_ANALYZER,
+ SkippingIndexType, COLUMN_FULLTEXT_CHANGE_OPT_KEY_ENABLE, COLUMN_FULLTEXT_OPT_KEY_ANALYZER,
COLUMN_FULLTEXT_OPT_KEY_CASE_SENSITIVE, COLUMN_SKIPPING_INDEX_OPT_KEY_GRANULARITY,
COLUMN_SKIPPING_INDEX_OPT_KEY_TYPE, COMMENT_KEY, FULLTEXT_KEY, INVERTED_INDEX_KEY,
SKIPPING_INDEX_KEY, TIME_INDEX_KEY,
diff --git a/src/datatypes/src/schema/column_schema.rs b/src/datatypes/src/schema/column_schema.rs
index 7a96ab5e2bf2..74e066adc7b4 100644
--- a/src/datatypes/src/schema/column_schema.rs
+++ b/src/datatypes/src/schema/column_schema.rs
@@ -543,7 +543,7 @@ pub struct SkippingIndexOptions {
pub granularity: u32,
/// The type of the skip index.
#[serde(default)]
- pub index_type: SkipIndexType,
+ pub index_type: SkippingIndexType,
}
impl fmt::Display for SkippingIndexOptions {
@@ -556,15 +556,15 @@ impl fmt::Display for SkippingIndexOptions {
/// Skip index types.
#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize, Visit, VisitMut)]
-pub enum SkipIndexType {
+pub enum SkippingIndexType {
#[default]
BloomFilter,
}
-impl fmt::Display for SkipIndexType {
+impl fmt::Display for SkippingIndexType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
- SkipIndexType::BloomFilter => write!(f, "BLOOM"),
+ SkippingIndexType::BloomFilter => write!(f, "BLOOM"),
}
}
}
@@ -587,7 +587,7 @@ impl TryFrom<HashMap<String, String>> for SkippingIndexOptions {
// Parse index type with default value BloomFilter
let index_type = match options.get(COLUMN_SKIPPING_INDEX_OPT_KEY_TYPE) {
Some(typ) => match typ.to_ascii_uppercase().as_str() {
- "BLOOM" => SkipIndexType::BloomFilter,
+ "BLOOM" => SkippingIndexType::BloomFilter,
_ => {
return error::InvalidSkippingIndexOptionSnafu {
msg: format!("Invalid index type: {typ}, expected: 'BLOOM'"),
@@ -595,7 +595,7 @@ impl TryFrom<HashMap<String, String>> for SkippingIndexOptions {
.fail();
}
},
- None => SkipIndexType::default(),
+ None => SkippingIndexType::default(),
};
Ok(SkippingIndexOptions {
diff --git a/src/index/src/bloom_filter/creator.rs b/src/index/src/bloom_filter/creator.rs
index f8c54239645b..da95334782a7 100644
--- a/src/index/src/bloom_filter/creator.rs
+++ b/src/index/src/bloom_filter/creator.rs
@@ -73,7 +73,7 @@ impl BloomFilterCreator {
/// `rows_per_segment` <= 0
pub fn new(
rows_per_segment: usize,
- intermediate_provider: Box<dyn ExternalTempFileProvider>,
+ intermediate_provider: Arc<dyn ExternalTempFileProvider>,
global_memory_usage: Arc<AtomicUsize>,
global_memory_usage_threshold: Option<usize>,
) -> Self {
@@ -252,7 +252,7 @@ mod tests {
let mut writer = Cursor::new(Vec::new());
let mut creator = BloomFilterCreator::new(
2,
- Box::new(MockExternalTempFileProvider::new()),
+ Arc::new(MockExternalTempFileProvider::new()),
Arc::new(AtomicUsize::new(0)),
None,
);
@@ -322,7 +322,7 @@ mod tests {
let mut writer = Cursor::new(Vec::new());
let mut creator = BloomFilterCreator::new(
2,
- Box::new(MockExternalTempFileProvider::new()),
+ Arc::new(MockExternalTempFileProvider::new()),
Arc::new(AtomicUsize::new(0)),
None,
);
diff --git a/src/index/src/bloom_filter/creator/finalize_segment.rs b/src/index/src/bloom_filter/creator/finalize_segment.rs
index 091b1ee6aac0..e97652f5fc6a 100644
--- a/src/index/src/bloom_filter/creator/finalize_segment.rs
+++ b/src/index/src/bloom_filter/creator/finalize_segment.rs
@@ -43,7 +43,7 @@ pub struct FinalizedBloomFilterStorage {
intermediate_prefix: String,
/// The provider for intermediate Bloom filter files.
- intermediate_provider: Box<dyn ExternalTempFileProvider>,
+ intermediate_provider: Arc<dyn ExternalTempFileProvider>,
/// The memory usage of the in-memory Bloom filters.
memory_usage: usize,
@@ -59,7 +59,7 @@ pub struct FinalizedBloomFilterStorage {
impl FinalizedBloomFilterStorage {
/// Creates a new `FinalizedBloomFilterStorage`.
pub fn new(
- intermediate_provider: Box<dyn ExternalTempFileProvider>,
+ intermediate_provider: Arc<dyn ExternalTempFileProvider>,
global_memory_usage: Arc<AtomicUsize>,
global_memory_usage_threshold: Option<usize>,
) -> Self {
@@ -132,7 +132,7 @@ impl FinalizedBloomFilterStorage {
/// Drains the storage and returns a stream of finalized Bloom filter segments.
pub async fn drain(
&mut self,
- ) -> Result<Pin<Box<dyn Stream<Item = Result<FinalizedBloomFilterSegment>> + '_>>> {
+ ) -> Result<Pin<Box<dyn Stream<Item = Result<FinalizedBloomFilterSegment>> + Send + '_>>> {
// FAST PATH: memory only
if self.intermediate_file_id_counter == 0 {
return Ok(Box::pin(stream::iter(self.in_memory.drain(..).map(Ok))));
@@ -257,7 +257,7 @@ mod tests {
let global_memory_usage = Arc::new(AtomicUsize::new(0));
let global_memory_usage_threshold = Some(1024 * 1024); // 1MB
- let provider = Box::new(mock_provider);
+ let provider = Arc::new(mock_provider);
let mut storage = FinalizedBloomFilterStorage::new(
provider,
global_memory_usage.clone(),
diff --git a/src/index/src/bloom_filter/reader.rs b/src/index/src/bloom_filter/reader.rs
index 788afe033124..6dc592100fcf 100644
--- a/src/index/src/bloom_filter/reader.rs
+++ b/src/index/src/bloom_filter/reader.rs
@@ -190,7 +190,7 @@ mod tests {
let mut writer = Cursor::new(vec![]);
let mut creator = BloomFilterCreator::new(
2,
- Box::new(MockExternalTempFileProvider::new()),
+ Arc::new(MockExternalTempFileProvider::new()),
Arc::new(AtomicUsize::new(0)),
None,
);
diff --git a/src/mito2/src/compaction/compactor.rs b/src/mito2/src/compaction/compactor.rs
index e2499140fd61..e7d5e779b675 100644
--- a/src/mito2/src/compaction/compactor.rs
+++ b/src/mito2/src/compaction/compactor.rs
@@ -21,7 +21,6 @@ use common_telemetry::{info, warn};
use common_time::TimeToLive;
use object_store::manager::ObjectStoreManagerRef;
use serde::{Deserialize, Serialize};
-use smallvec::SmallVec;
use snafu::{OptionExt, ResultExt};
use store_api::metadata::RegionMetadataRef;
use store_api::storage::RegionId;
@@ -41,7 +40,7 @@ use crate::region::options::RegionOptions;
use crate::region::version::VersionRef;
use crate::region::{ManifestContext, RegionLeaderState, RegionRoleState};
use crate::schedule::scheduler::LocalScheduler;
-use crate::sst::file::{FileMeta, IndexType};
+use crate::sst::file::FileMeta;
use crate::sst::file_purger::LocalFilePurger;
use crate::sst::index::intermediate::IntermediateManager;
use crate::sst::index::puffin_manager::PuffinManagerFactory;
@@ -336,16 +335,7 @@ impl Compactor for DefaultCompactor {
time_range: sst_info.time_range,
level: output.output_level,
file_size: sst_info.file_size,
- available_indexes: {
- let mut indexes = SmallVec::new();
- if sst_info.index_metadata.inverted_index.is_available() {
- indexes.push(IndexType::InvertedIndex);
- }
- if sst_info.index_metadata.fulltext_index.is_available() {
- indexes.push(IndexType::FulltextIndex);
- }
- indexes
- },
+ available_indexes: sst_info.index_metadata.build_available_indexes(),
index_file_size: sst_info.index_metadata.file_size,
num_rows: sst_info.num_rows as u64,
num_row_groups: sst_info.num_row_groups,
diff --git a/src/mito2/src/error.rs b/src/mito2/src/error.rs
index 1baffd4a7fa1..3a6f368bacd8 100644
--- a/src/mito2/src/error.rs
+++ b/src/mito2/src/error.rs
@@ -816,8 +816,8 @@ pub enum Error {
location: Location,
},
- #[snafu(display("Failed to retrieve fulltext options from column metadata"))]
- FulltextOptions {
+ #[snafu(display("Failed to retrieve index options from column metadata"))]
+ IndexOptions {
#[snafu(implicit)]
location: Location,
source: datatypes::error::Error,
@@ -904,6 +904,20 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
+
+ #[snafu(display("Failed to push value to bloom filter"))]
+ PushBloomFilterValue {
+ source: index::bloom_filter::error::Error,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display("Failed to finish bloom filter"))]
+ BloomFilterFinish {
+ source: index::bloom_filter::error::Error,
+ #[snafu(implicit)]
+ location: Location,
+ },
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
@@ -1029,7 +1043,7 @@ impl ErrorExt for Error {
UnsupportedOperation { .. } => StatusCode::Unsupported,
RemoteCompaction { .. } => StatusCode::Unexpected,
- FulltextOptions { source, .. } => source.status_code(),
+ IndexOptions { source, .. } => source.status_code(),
CreateFulltextCreator { source, .. } => source.status_code(),
CastVector { source, .. } => source.status_code(),
FulltextPushText { source, .. }
@@ -1039,7 +1053,12 @@ impl ErrorExt for Error {
RegionBusy { .. } => StatusCode::RegionBusy,
GetSchemaMetadata { source, .. } => source.status_code(),
Timeout { .. } => StatusCode::Cancelled,
+
DecodeArrowRowGroup { .. } => StatusCode::Internal,
+
+ PushBloomFilterValue { source, .. } | BloomFilterFinish { source, .. } => {
+ source.status_code()
+ }
}
}
diff --git a/src/mito2/src/flush.rs b/src/mito2/src/flush.rs
index b522f225f9f0..64a739068ad9 100644
--- a/src/mito2/src/flush.rs
+++ b/src/mito2/src/flush.rs
@@ -19,7 +19,6 @@ use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use common_telemetry::{debug, error, info, trace};
-use smallvec::SmallVec;
use snafu::ResultExt;
use store_api::storage::RegionId;
use strum::IntoStaticStr;
@@ -45,7 +44,7 @@ use crate::request::{
SenderWriteRequest, WorkerRequest,
};
use crate::schedule::scheduler::{Job, SchedulerRef};
-use crate::sst::file::{FileId, FileMeta, IndexType};
+use crate::sst::file::{FileId, FileMeta};
use crate::sst::parquet::WriteOptions;
use crate::worker::WorkerListener;
@@ -378,16 +377,7 @@ impl RegionFlushTask {
time_range: sst_info.time_range,
level: 0,
file_size: sst_info.file_size,
- available_indexes: {
- let mut indexes = SmallVec::new();
- if sst_info.index_metadata.inverted_index.is_available() {
- indexes.push(IndexType::InvertedIndex);
- }
- if sst_info.index_metadata.fulltext_index.is_available() {
- indexes.push(IndexType::FulltextIndex);
- }
- indexes
- },
+ available_indexes: sst_info.index_metadata.build_available_indexes(),
index_file_size: sst_info.index_metadata.file_size,
num_rows: sst_info.num_rows as u64,
num_row_groups: sst_info.num_row_groups,
diff --git a/src/mito2/src/sst/file.rs b/src/mito2/src/sst/file.rs
index 5a9932ab433b..844d3e5d08f8 100644
--- a/src/mito2/src/sst/file.rs
+++ b/src/mito2/src/sst/file.rs
@@ -143,6 +143,8 @@ pub enum IndexType {
InvertedIndex,
/// Full-text index.
FulltextIndex,
+ /// Bloom filter.
+ BloomFilter,
}
impl FileMeta {
@@ -156,6 +158,11 @@ impl FileMeta {
self.available_indexes.contains(&IndexType::FulltextIndex)
}
+ /// Returns true if the file has a bloom filter
+ pub fn bloom_filter_available(&self) -> bool {
+ self.available_indexes.contains(&IndexType::BloomFilter)
+ }
+
/// Returns the size of the inverted index file
pub fn inverted_index_size(&self) -> Option<u64> {
if self.available_indexes.len() == 1 && self.inverted_index_available() {
diff --git a/src/mito2/src/sst/index.rs b/src/mito2/src/sst/index.rs
index 1972f3d7abb6..b6eac91e56d9 100644
--- a/src/mito2/src/sst/index.rs
+++ b/src/mito2/src/sst/index.rs
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+pub(crate) mod bloom_filter;
+mod codec;
pub(crate) mod fulltext_index;
mod indexer;
pub(crate) mod intermediate;
@@ -22,8 +24,10 @@ pub(crate) mod store;
use std::num::NonZeroUsize;
+use bloom_filter::creator::BloomFilterIndexer;
use common_telemetry::{debug, warn};
use puffin_manager::SstPuffinManager;
+use smallvec::SmallVec;
use statistics::{ByteCount, RowCount};
use store_api::metadata::RegionMetadataRef;
use store_api::storage::{ColumnId, RegionId};
@@ -33,13 +37,14 @@ use crate::config::{FulltextIndexConfig, InvertedIndexConfig};
use crate::metrics::INDEX_CREATE_MEMORY_USAGE;
use crate::read::Batch;
use crate::region::options::IndexOptions;
-use crate::sst::file::FileId;
+use crate::sst::file::{FileId, IndexType};
use crate::sst::index::fulltext_index::creator::FulltextIndexer;
use crate::sst::index::intermediate::IntermediateManager;
use crate::sst::index::inverted_index::creator::InvertedIndexer;
pub(crate) const TYPE_INVERTED_INDEX: &str = "inverted_index";
pub(crate) const TYPE_FULLTEXT_INDEX: &str = "fulltext_index";
+pub(crate) const TYPE_BLOOM_FILTER: &str = "bloom_filter";
/// Output of the index creation.
#[derive(Debug, Clone, Default)]
@@ -50,6 +55,24 @@ pub struct IndexOutput {
pub inverted_index: InvertedIndexOutput,
/// Fulltext index output.
pub fulltext_index: FulltextIndexOutput,
+ /// Bloom filter output.
+ pub bloom_filter: BloomFilterOutput,
+}
+
+impl IndexOutput {
+ pub fn build_available_indexes(&self) -> SmallVec<[IndexType; 4]> {
+ let mut indexes = SmallVec::new();
+ if self.inverted_index.is_available() {
+ indexes.push(IndexType::InvertedIndex);
+ }
+ if self.fulltext_index.is_available() {
+ indexes.push(IndexType::FulltextIndex);
+ }
+ if self.bloom_filter.is_available() {
+ indexes.push(IndexType::BloomFilter);
+ }
+ indexes
+ }
}
/// Base output of the index creation.
@@ -73,6 +96,8 @@ impl IndexBaseOutput {
pub type InvertedIndexOutput = IndexBaseOutput;
/// Output of the fulltext index creation.
pub type FulltextIndexOutput = IndexBaseOutput;
+/// Output of the bloom filter creation.
+pub type BloomFilterOutput = IndexBaseOutput;
/// The index creator that hides the error handling details.
#[derive(Default)]
@@ -86,6 +111,8 @@ pub struct Indexer {
last_mem_inverted_index: usize,
fulltext_indexer: Option<FulltextIndexer>,
last_mem_fulltext_index: usize,
+ bloom_filter_indexer: Option<BloomFilterIndexer>,
+ last_mem_bloom_filter: usize,
}
impl Indexer {
@@ -129,6 +156,15 @@ impl Indexer {
.with_label_values(&[TYPE_FULLTEXT_INDEX])
.add(fulltext_mem as i64 - self.last_mem_fulltext_index as i64);
self.last_mem_fulltext_index = fulltext_mem;
+
+ let bloom_filter_mem = self
+ .bloom_filter_indexer
+ .as_ref()
+ .map_or(0, |creator| creator.memory_usage());
+ INDEX_CREATE_MEMORY_USAGE
+ .with_label_values(&[TYPE_BLOOM_FILTER])
+ .add(bloom_filter_mem as i64 - self.last_mem_bloom_filter as i64);
+ self.last_mem_bloom_filter = bloom_filter_mem;
}
}
@@ -158,7 +194,11 @@ impl<'a> IndexerBuilder<'a> {
indexer.inverted_indexer = self.build_inverted_indexer();
indexer.fulltext_indexer = self.build_fulltext_indexer().await;
- if indexer.inverted_indexer.is_none() && indexer.fulltext_indexer.is_none() {
+ indexer.bloom_filter_indexer = self.build_bloom_filter_indexer();
+ if indexer.inverted_indexer.is_none()
+ && indexer.fulltext_indexer.is_none()
+ && indexer.bloom_filter_indexer.is_none()
+ {
indexer.abort().await;
return Indexer::default();
}
@@ -266,7 +306,7 @@ impl<'a> IndexerBuilder<'a> {
if cfg!(any(test, feature = "test")) {
panic!(
- "Failed to create full-text indexer, region_id: {}, file_id: {}, err: {}",
+ "Failed to create full-text indexer, region_id: {}, file_id: {}, err: {:?}",
self.metadata.region_id, self.file_id, err
);
} else {
@@ -278,6 +318,53 @@ impl<'a> IndexerBuilder<'a> {
None
}
+
+ fn build_bloom_filter_indexer(&self) -> Option<BloomFilterIndexer> {
+ let create = true; // TODO(zhongzc): add config for bloom filter
+
+ if !create {
+ debug!(
+ "Skip creating bloom filter due to config, region_id: {}, file_id: {}",
+ self.metadata.region_id, self.file_id,
+ );
+ return None;
+ }
+
+ let mem_limit = Some(16 * 1024 * 1024); // TODO(zhongzc): add config for bloom filter
+ let indexer = BloomFilterIndexer::new(
+ self.file_id,
+ self.metadata,
+ self.intermediate_manager.clone(),
+ mem_limit,
+ );
+
+ let err = match indexer {
+ Ok(indexer) => {
+ if indexer.is_none() {
+ debug!(
+ "Skip creating bloom filter due to no columns require indexing, region_id: {}, file_id: {}",
+ self.metadata.region_id, self.file_id,
+ );
+ }
+ return indexer;
+ }
+ Err(err) => err,
+ };
+
+ if cfg!(any(test, feature = "test")) {
+ panic!(
+ "Failed to create bloom filter, region_id: {}, file_id: {}, err: {:?}",
+ self.metadata.region_id, self.file_id, err
+ );
+ } else {
+ warn!(
+ err; "Failed to create bloom filter, region_id: {}, file_id: {}",
+ self.metadata.region_id, self.file_id,
+ );
+ }
+
+ None
+ }
}
#[cfg(test)]
@@ -286,7 +373,9 @@ mod tests {
use api::v1::SemanticType;
use datatypes::data_type::ConcreteDataType;
- use datatypes::schema::{ColumnSchema, FulltextOptions};
+ use datatypes::schema::{
+ ColumnSchema, FulltextOptions, SkippingIndexOptions, SkippingIndexType,
+ };
use object_store::services::Memory;
use object_store::ObjectStore;
use puffin_manager::PuffinManagerFactory;
@@ -298,12 +387,14 @@ mod tests {
struct MetaConfig {
with_tag: bool,
with_fulltext: bool,
+ with_skipping_bloom: bool,
}
fn mock_region_metadata(
MetaConfig {
with_tag,
with_fulltext,
+ with_skipping_bloom,
}: MetaConfig,
) -> RegionMetadataRef {
let mut builder = RegionMetadataBuilder::new(RegionId::new(1, 2));
@@ -354,6 +445,24 @@ mod tests {
builder.push_column_metadata(column);
}
+ if with_skipping_bloom {
+ let column_schema =
+ ColumnSchema::new("bloom", ConcreteDataType::string_datatype(), false)
+ .with_skipping_options(SkippingIndexOptions {
+ granularity: 42,
+ index_type: SkippingIndexType::BloomFilter,
+ })
+ .unwrap();
+
+ let column = ColumnMetadata {
+ column_schema,
+ semantic_type: SemanticType::Field,
+ column_id: 5,
+ };
+
+ builder.push_column_metadata(column);
+ }
+
Arc::new(builder.build().unwrap())
}
@@ -374,6 +483,7 @@ mod tests {
let metadata = mock_region_metadata(MetaConfig {
with_tag: true,
with_fulltext: true,
+ with_skipping_bloom: true,
});
let indexer = IndexerBuilder {
op_type: OperationType::Flush,
@@ -392,6 +502,7 @@ mod tests {
assert!(indexer.inverted_indexer.is_some());
assert!(indexer.fulltext_indexer.is_some());
+ assert!(indexer.bloom_filter_indexer.is_some());
}
#[tokio::test]
@@ -403,6 +514,7 @@ mod tests {
let metadata = mock_region_metadata(MetaConfig {
with_tag: true,
with_fulltext: true,
+ with_skipping_bloom: true,
});
let indexer = IndexerBuilder {
op_type: OperationType::Flush,
@@ -456,6 +568,7 @@ mod tests {
let metadata = mock_region_metadata(MetaConfig {
with_tag: false,
with_fulltext: true,
+ with_skipping_bloom: true,
});
let indexer = IndexerBuilder {
op_type: OperationType::Flush,
@@ -474,10 +587,12 @@ mod tests {
assert!(indexer.inverted_indexer.is_none());
assert!(indexer.fulltext_indexer.is_some());
+ assert!(indexer.bloom_filter_indexer.is_some());
let metadata = mock_region_metadata(MetaConfig {
with_tag: true,
with_fulltext: false,
+ with_skipping_bloom: true,
});
let indexer = IndexerBuilder {
op_type: OperationType::Flush,
@@ -486,7 +601,7 @@ mod tests {
metadata: &metadata,
row_group_size: 1024,
puffin_manager: factory.build(mock_object_store()),
- intermediate_manager: intm_manager,
+ intermediate_manager: intm_manager.clone(),
index_options: IndexOptions::default(),
inverted_index_config: InvertedIndexConfig::default(),
fulltext_index_config: FulltextIndexConfig::default(),
@@ -496,6 +611,31 @@ mod tests {
assert!(indexer.inverted_indexer.is_some());
assert!(indexer.fulltext_indexer.is_none());
+ assert!(indexer.bloom_filter_indexer.is_some());
+
+ let metadata = mock_region_metadata(MetaConfig {
+ with_tag: true,
+ with_fulltext: true,
+ with_skipping_bloom: false,
+ });
+ let indexer = IndexerBuilder {
+ op_type: OperationType::Flush,
+ file_id: FileId::random(),
+ file_path: "test".to_string(),
+ metadata: &metadata,
+ row_group_size: 1024,
+ puffin_manager: factory.build(mock_object_store()),
+ intermediate_manager: intm_manager,
+ index_options: IndexOptions::default(),
+ inverted_index_config: InvertedIndexConfig::default(),
+ fulltext_index_config: FulltextIndexConfig::default(),
+ }
+ .build()
+ .await;
+
+ assert!(indexer.inverted_indexer.is_some());
+ assert!(indexer.fulltext_indexer.is_some());
+ assert!(indexer.bloom_filter_indexer.is_none());
}
#[tokio::test]
@@ -507,6 +647,7 @@ mod tests {
let metadata = mock_region_metadata(MetaConfig {
with_tag: true,
with_fulltext: true,
+ with_skipping_bloom: true,
});
let indexer = IndexerBuilder {
op_type: OperationType::Flush,
diff --git a/src/mito2/src/sst/index/bloom_filter.rs b/src/mito2/src/sst/index/bloom_filter.rs
new file mode 100644
index 000000000000..347195a3b16b
--- /dev/null
+++ b/src/mito2/src/sst/index/bloom_filter.rs
@@ -0,0 +1,17 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+pub(crate) mod creator;
+
+const INDEX_BLOB_TYPE: &str = "greptime-bloom-filter-v1";
diff --git a/src/mito2/src/sst/index/bloom_filter/creator.rs b/src/mito2/src/sst/index/bloom_filter/creator.rs
new file mode 100644
index 000000000000..8c56800f47e7
--- /dev/null
+++ b/src/mito2/src/sst/index/bloom_filter/creator.rs
@@ -0,0 +1,530 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::HashMap;
+use std::sync::atomic::AtomicUsize;
+use std::sync::Arc;
+
+use common_telemetry::warn;
+use datatypes::schema::SkippingIndexType;
+use index::bloom_filter::creator::BloomFilterCreator;
+use puffin::puffin_manager::{PuffinWriter, PutOptions};
+use snafu::{ensure, ResultExt};
+use store_api::metadata::RegionMetadataRef;
+use store_api::storage::ColumnId;
+use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
+
+use crate::error::{
+ BiErrorsSnafu, BloomFilterFinishSnafu, IndexOptionsSnafu, OperateAbortedIndexSnafu,
+ PuffinAddBlobSnafu, PushBloomFilterValueSnafu, Result,
+};
+use crate::read::Batch;
+use crate::row_converter::SortField;
+use crate::sst::file::FileId;
+use crate::sst::index::bloom_filter::INDEX_BLOB_TYPE;
+use crate::sst::index::codec::{IndexValueCodec, IndexValuesCodec};
+use crate::sst::index::intermediate::{
+ IntermediateLocation, IntermediateManager, TempFileProvider,
+};
+use crate::sst::index::puffin_manager::SstPuffinWriter;
+use crate::sst::index::statistics::{ByteCount, RowCount, Statistics};
+use crate::sst::index::TYPE_BLOOM_FILTER;
+
+/// The buffer size for the pipe used to send index data to the puffin blob.
+const PIPE_BUFFER_SIZE_FOR_SENDING_BLOB: usize = 8192;
+
+/// The indexer for the bloom filter index.
+pub struct BloomFilterIndexer {
+ /// The bloom filter creators.
+ creators: HashMap<ColumnId, BloomFilterCreator>,
+
+ /// The provider for intermediate files.
+ temp_file_provider: Arc<TempFileProvider>,
+
+ /// Codec for decoding primary keys.
+ codec: IndexValuesCodec,
+
+ /// Whether the indexing process has been aborted.
+ aborted: bool,
+
+ /// The statistics of the indexer.
+ stats: Statistics,
+
+ /// The global memory usage.
+ global_memory_usage: Arc<AtomicUsize>,
+}
+
+impl BloomFilterIndexer {
+ /// Creates a new bloom filter indexer.
+ pub fn new(
+ sst_file_id: FileId,
+ metadata: &RegionMetadataRef,
+ intermediate_manager: IntermediateManager,
+ memory_usage_threshold: Option<usize>,
+ ) -> Result<Option<Self>> {
+ let mut creators = HashMap::new();
+
+ let temp_file_provider = Arc::new(TempFileProvider::new(
+ IntermediateLocation::new(&metadata.region_id, &sst_file_id),
+ intermediate_manager,
+ ));
+ let global_memory_usage = Arc::new(AtomicUsize::new(0));
+
+ for column in &metadata.column_metadatas {
+ let options =
+ column
+ .column_schema
+ .skipping_index_options()
+ .context(IndexOptionsSnafu {
+ column_name: &column.column_schema.name,
+ })?;
+
+ let options = match options {
+ Some(options) if options.index_type == SkippingIndexType::BloomFilter => options,
+ _ => continue,
+ };
+
+ let creator = BloomFilterCreator::new(
+ options.granularity as _,
+ temp_file_provider.clone(),
+ global_memory_usage.clone(),
+ memory_usage_threshold,
+ );
+ creators.insert(column.column_id, creator);
+ }
+
+ if creators.is_empty() {
+ return Ok(None);
+ }
+
+ let codec = IndexValuesCodec::from_tag_columns(metadata.primary_key_columns());
+ let indexer = Self {
+ creators,
+ temp_file_provider,
+ codec,
+ aborted: false,
+ stats: Statistics::new(TYPE_BLOOM_FILTER),
+ global_memory_usage,
+ };
+ Ok(Some(indexer))
+ }
+
+ /// Updates index with a batch of rows.
+ /// Garbage will be cleaned up if failed to update.
+ ///
+ /// TODO(zhongzc): duplicate with `mito2::sst::index::inverted_index::creator::InvertedIndexCreator`
+ pub async fn update(&mut self, batch: &Batch) -> Result<()> {
+ ensure!(!self.aborted, OperateAbortedIndexSnafu);
+
+ if self.creators.is_empty() {
+ return Ok(());
+ }
+
+ if let Err(update_err) = self.do_update(batch).await {
+ // clean up garbage if failed to update
+ if let Err(err) = self.do_cleanup().await {
+ if cfg!(any(test, feature = "test")) {
+ panic!("Failed to clean up index creator, err: {err:?}",);
+ } else {
+ warn!(err; "Failed to clean up index creator");
+ }
+ }
+ return Err(update_err);
+ }
+
+ Ok(())
+ }
+
+ /// Finishes index creation and cleans up garbage.
+ /// Returns the number of rows and bytes written.
+ ///
+ /// TODO(zhongzc): duplicate with `mito2::sst::index::inverted_index::creator::InvertedIndexCreator`
+ pub async fn finish(
+ &mut self,
+ puffin_writer: &mut SstPuffinWriter,
+ ) -> Result<(RowCount, ByteCount)> {
+ ensure!(!self.aborted, OperateAbortedIndexSnafu);
+
+ if self.stats.row_count() == 0 {
+ // no IO is performed, no garbage to clean up, just return
+ return Ok((0, 0));
+ }
+
+ let finish_res = self.do_finish(puffin_writer).await;
+ // clean up garbage no matter finish successfully or not
+ if let Err(err) = self.do_cleanup().await {
+ if cfg!(any(test, feature = "test")) {
+ panic!("Failed to clean up index creator, err: {err:?}",);
+ } else {
+ warn!(err; "Failed to clean up index creator");
+ }
+ }
+
+ finish_res.map(|_| (self.stats.row_count(), self.stats.byte_count()))
+ }
+
+ /// Aborts index creation and clean up garbage.
+ ///
+ /// TODO(zhongzc): duplicate with `mito2::sst::index::inverted_index::creator::InvertedIndexCreator`
+ pub async fn abort(&mut self) -> Result<()> {
+ if self.aborted {
+ return Ok(());
+ }
+ self.aborted = true;
+
+ self.do_cleanup().await
+ }
+
+ async fn do_update(&mut self, batch: &Batch) -> Result<()> {
+ let mut guard = self.stats.record_update();
+
+ let n = batch.num_rows();
+ guard.inc_row_count(n);
+
+ // Tags
+ for ((col_id, _), field, value) in self.codec.decode(batch.primary_key())? {
+ let Some(creator) = self.creators.get_mut(col_id) else {
+ continue;
+ };
+ let elems = value
+ .map(|v| {
+ let mut buf = vec![];
+ IndexValueCodec::encode_nonnull_value(v.as_value_ref(), field, &mut buf)?;
+ Ok(buf)
+ })
+ .transpose()?;
+ creator
+ .push_n_row_elems(n, elems)
+ .await
+ .context(PushBloomFilterValueSnafu)?;
+ }
+
+ // Fields
+ for field in batch.fields() {
+ let Some(creator) = self.creators.get_mut(&field.column_id) else {
+ continue;
+ };
+
+ let sort_field = SortField::new(field.data.data_type());
+ for i in 0..n {
+ let value = field.data.get_ref(i);
+ let elems = (!value.is_null())
+ .then(|| {
+ let mut buf = vec![];
+ IndexValueCodec::encode_nonnull_value(value, &sort_field, &mut buf)?;
+ Ok(buf)
+ })
+ .transpose()?;
+
+ creator
+ .push_row_elems(elems)
+ .await
+ .context(PushBloomFilterValueSnafu)?;
+ }
+ }
+ Ok(())
+ }
+
+ /// TODO(zhongzc): duplicate with `mito2::sst::index::inverted_index::creator::InvertedIndexCreator`
+ async fn do_finish(&mut self, puffin_writer: &mut SstPuffinWriter) -> Result<()> {
+ let mut guard = self.stats.record_finish();
+
+ for (id, creator) in &mut self.creators {
+ let written_bytes = Self::do_finish_single_creator(id, creator, puffin_writer).await?;
+ guard.inc_byte_count(written_bytes);
+ }
+
+ Ok(())
+ }
+
+ async fn do_cleanup(&mut self) -> Result<()> {
+ let mut _guard = self.stats.record_cleanup();
+
+ self.creators.clear();
+ self.temp_file_provider.cleanup().await
+ }
+
+ /// Data flow of finishing index:
+ ///
+ /// ```text
+ /// (In Memory Buffer)
+ /// ┌──────┐
+ /// ┌─────────────┐ │ PIPE │
+ /// │ │ write index data │ │
+ /// │ IndexWriter ├──────────────────►│ tx │
+ /// │ │ │ │
+ /// └─────────────┘ │ │
+ /// ┌─────────────────┤ rx │
+ /// ┌─────────────┐ │ read as blob └──────┘
+ /// │ │ │
+ /// │ PuffinWriter├─┤
+ /// │ │ │ copy to file ┌──────┐
+ /// └─────────────┘ └────────────────►│ File │
+ /// └──────┘
+ /// ```
+ ///
+ /// TODO(zhongzc): duplicate with `mito2::sst::index::inverted_index::creator::InvertedIndexCreator`
+ async fn do_finish_single_creator(
+ col_id: &ColumnId,
+ creator: &mut BloomFilterCreator,
+ puffin_writer: &mut SstPuffinWriter,
+ ) -> Result<ByteCount> {
+ let (tx, rx) = tokio::io::duplex(PIPE_BUFFER_SIZE_FOR_SENDING_BLOB);
+
+ let blob_name = format!("{}-{}", INDEX_BLOB_TYPE, col_id);
+ let (index_finish, puffin_add_blob) = futures::join!(
+ creator.finish(tx.compat_write()),
+ puffin_writer.put_blob(&blob_name, rx.compat(), PutOptions::default())
+ );
+
+ match (
+ puffin_add_blob.context(PuffinAddBlobSnafu),
+ index_finish.context(BloomFilterFinishSnafu),
+ ) {
+ (Err(e1), Err(e2)) => BiErrorsSnafu {
+ first: Box::new(e1),
+ second: Box::new(e2),
+ }
+ .fail()?,
+
+ (Ok(_), e @ Err(_)) => e?,
+ (e @ Err(_), Ok(_)) => e.map(|_| ())?,
+ (Ok(written_bytes), Ok(_)) => {
+ return Ok(written_bytes);
+ }
+ }
+
+ Ok(0)
+ }
+
+ /// Returns the memory usage of the indexer.
+ pub fn memory_usage(&self) -> usize {
+ self.global_memory_usage
+ .load(std::sync::atomic::Ordering::Relaxed)
+ }
+
+ /// Returns the column ids to be indexed.
+ pub fn column_ids(&self) -> impl Iterator<Item = ColumnId> + use<'_> {
+ self.creators.keys().copied()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::iter;
+
+ use api::v1::SemanticType;
+ use datatypes::data_type::ConcreteDataType;
+ use datatypes::schema::{ColumnSchema, SkippingIndexOptions};
+ use datatypes::value::ValueRef;
+ use datatypes::vectors::{UInt64Vector, UInt8Vector};
+ use index::bloom_filter::reader::{BloomFilterReader, BloomFilterReaderImpl};
+ use object_store::services::Memory;
+ use object_store::ObjectStore;
+ use puffin::puffin_manager::{BlobGuard, PuffinManager, PuffinReader};
+ use store_api::metadata::{ColumnMetadata, RegionMetadataBuilder};
+ use store_api::storage::RegionId;
+
+ use super::*;
+ use crate::read::BatchColumn;
+ use crate::row_converter::{McmpRowCodec, RowCodec, SortField};
+ use crate::sst::index::puffin_manager::PuffinManagerFactory;
+
+ fn mock_object_store() -> ObjectStore {
+ ObjectStore::new(Memory::default()).unwrap().finish()
+ }
+
+ async fn new_intm_mgr(path: impl AsRef<str>) -> IntermediateManager {
+ IntermediateManager::init_fs(path).await.unwrap()
+ }
+
+ /// tag_str:
+ /// - type: string
+ /// - index: bloom filter
+ /// - granularity: 2
+ /// - column_id: 1
+ ///
+ /// ts:
+ /// - type: timestamp
+ /// - index: time index
+ /// - column_id: 2
+ ///
+ /// field_u64:
+ /// - type: uint64
+ /// - index: bloom filter
+ /// - granularity: 4
+ /// - column_id: 3
+ fn mock_region_metadata() -> RegionMetadataRef {
+ let mut builder = RegionMetadataBuilder::new(RegionId::new(1, 2));
+ builder
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new(
+ "tag_str",
+ ConcreteDataType::string_datatype(),
+ false,
+ )
+ .with_skipping_options(SkippingIndexOptions {
+ index_type: SkippingIndexType::BloomFilter,
+ granularity: 2,
+ })
+ .unwrap(),
+ semantic_type: SemanticType::Tag,
+ column_id: 1,
+ })
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new(
+ "ts",
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ false,
+ ),
+ semantic_type: SemanticType::Timestamp,
+ column_id: 2,
+ })
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new(
+ "field_u64",
+ ConcreteDataType::uint64_datatype(),
+ false,
+ )
+ .with_skipping_options(SkippingIndexOptions {
+ index_type: SkippingIndexType::BloomFilter,
+ granularity: 4,
+ })
+ .unwrap(),
+ semantic_type: SemanticType::Field,
+ column_id: 3,
+ })
+ .primary_key(vec![1]);
+
+ Arc::new(builder.build().unwrap())
+ }
+
+ fn new_batch(str_tag: impl AsRef<str>, u64_field: impl IntoIterator<Item = u64>) -> Batch {
+ let fields = vec![SortField::new(ConcreteDataType::string_datatype())];
+ let codec = McmpRowCodec::new(fields);
+ let row: [ValueRef; 1] = [str_tag.as_ref().into()];
+ let primary_key = codec.encode(row.into_iter()).unwrap();
+
+ let u64_field = BatchColumn {
+ column_id: 3,
+ data: Arc::new(UInt64Vector::from_iter_values(u64_field)),
+ };
+ let num_rows = u64_field.data.len();
+
+ Batch::new(
+ primary_key,
+ Arc::new(UInt64Vector::from_iter_values(
+ iter::repeat(0).take(num_rows),
+ )),
+ Arc::new(UInt64Vector::from_iter_values(
+ iter::repeat(0).take(num_rows),
+ )),
+ Arc::new(UInt8Vector::from_iter_values(
+ iter::repeat(1).take(num_rows),
+ )),
+ vec![u64_field],
+ )
+ .unwrap()
+ }
+
+ #[tokio::test]
+ async fn test_bloom_filter_indexer() {
+ let prefix = "test_bloom_filter_indexer_";
+ let object_store = mock_object_store();
+ let intm_mgr = new_intm_mgr(prefix).await;
+ let region_metadata = mock_region_metadata();
+ let memory_usage_threshold = Some(1024);
+
+ let mut indexer = BloomFilterIndexer::new(
+ FileId::random(),
+ ®ion_metadata,
+ intm_mgr,
+ memory_usage_threshold,
+ )
+ .unwrap()
+ .unwrap();
+
+ // push 20 rows
+ let batch = new_batch("tag1", 0..10);
+ indexer.update(&batch).await.unwrap();
+
+ let batch = new_batch("tag2", 10..20);
+ indexer.update(&batch).await.unwrap();
+
+ let (_d, factory) = PuffinManagerFactory::new_for_test_async(prefix).await;
+ let puffin_manager = factory.build(object_store);
+
+ let index_file_name = "index_file";
+ let mut puffin_writer = puffin_manager.writer(index_file_name).await.unwrap();
+ let (row_count, byte_count) = indexer.finish(&mut puffin_writer).await.unwrap();
+ assert_eq!(row_count, 20);
+ assert!(byte_count > 0);
+ puffin_writer.finish().await.unwrap();
+
+ let puffin_reader = puffin_manager.reader(index_file_name).await.unwrap();
+
+ // tag_str
+ {
+ let blob_guard = puffin_reader
+ .blob("greptime-bloom-filter-v1-1")
+ .await
+ .unwrap();
+ let reader = blob_guard.reader().await.unwrap();
+ let mut bloom_filter = BloomFilterReaderImpl::new(reader);
+ let metadata = bloom_filter.metadata().await.unwrap();
+
+ assert_eq!(metadata.bloom_filter_segments.len(), 10);
+ for i in 0..5 {
+ let bf = bloom_filter
+ .bloom_filter(&metadata.bloom_filter_segments[i])
+ .await
+ .unwrap();
+ assert!(bf.contains(b"tag1"));
+ }
+ for i in 5..10 {
+ let bf = bloom_filter
+ .bloom_filter(&metadata.bloom_filter_segments[i])
+ .await
+ .unwrap();
+ assert!(bf.contains(b"tag2"));
+ }
+ }
+
+ // field_u64
+ {
+ let sort_field = SortField::new(ConcreteDataType::uint64_datatype());
+
+ let blob_guard = puffin_reader
+ .blob("greptime-bloom-filter-v1-3")
+ .await
+ .unwrap();
+ let reader = blob_guard.reader().await.unwrap();
+ let mut bloom_filter = BloomFilterReaderImpl::new(reader);
+ let metadata = bloom_filter.metadata().await.unwrap();
+
+ assert_eq!(metadata.bloom_filter_segments.len(), 5);
+ for i in 0u64..20 {
+ let bf = bloom_filter
+ .bloom_filter(&metadata.bloom_filter_segments[i as usize / 4])
+ .await
+ .unwrap();
+ let mut buf = vec![];
+ IndexValueCodec::encode_nonnull_value(ValueRef::UInt64(i), &sort_field, &mut buf)
+ .unwrap();
+
+ assert!(bf.contains(&buf));
+ }
+ }
+ }
+}
diff --git a/src/mito2/src/sst/index/inverted_index/codec.rs b/src/mito2/src/sst/index/codec.rs
similarity index 100%
rename from src/mito2/src/sst/index/inverted_index/codec.rs
rename to src/mito2/src/sst/index/codec.rs
diff --git a/src/mito2/src/sst/index/fulltext_index/creator.rs b/src/mito2/src/sst/index/fulltext_index/creator.rs
index 416e39d9dd5e..41fa15bd7c72 100644
--- a/src/mito2/src/sst/index/fulltext_index/creator.rs
+++ b/src/mito2/src/sst/index/fulltext_index/creator.rs
@@ -27,8 +27,7 @@ use store_api::storage::{ColumnId, ConcreteDataType, RegionId};
use crate::error::{
CastVectorSnafu, CreateFulltextCreatorSnafu, FieldTypeMismatchSnafu, FulltextFinishSnafu,
- FulltextOptionsSnafu, FulltextPushTextSnafu, OperateAbortedIndexSnafu, PuffinAddBlobSnafu,
- Result,
+ FulltextPushTextSnafu, IndexOptionsSnafu, OperateAbortedIndexSnafu, PuffinAddBlobSnafu, Result,
};
use crate::read::Batch;
use crate::sst::file::FileId;
@@ -61,13 +60,12 @@ impl FulltextIndexer {
let mut creators = HashMap::new();
for column in &metadata.column_metadatas {
- let options =
- column
- .column_schema
- .fulltext_options()
- .context(FulltextOptionsSnafu {
- column_name: &column.column_schema.name,
- })?;
+ let options = column
+ .column_schema
+ .fulltext_options()
+ .context(IndexOptionsSnafu {
+ column_name: &column.column_schema.name,
+ })?;
// Relax the type constraint here as many types can be casted to string.
diff --git a/src/mito2/src/sst/index/indexer/abort.rs b/src/mito2/src/sst/index/indexer/abort.rs
index 68034d48fb29..5b29009a033b 100644
--- a/src/mito2/src/sst/index/indexer/abort.rs
+++ b/src/mito2/src/sst/index/indexer/abort.rs
@@ -20,6 +20,7 @@ impl Indexer {
pub(crate) async fn do_abort(&mut self) {
self.do_abort_inverted_index().await;
self.do_abort_fulltext_index().await;
+ self.do_abort_bloom_filter().await;
self.puffin_manager = None;
}
@@ -33,7 +34,7 @@ impl Indexer {
if cfg!(any(test, feature = "test")) {
panic!(
- "Failed to abort inverted index, region_id: {}, file_id: {}, err: {}",
+ "Failed to abort inverted index, region_id: {}, file_id: {}, err: {:?}",
self.region_id, self.file_id, err
);
} else {
@@ -54,7 +55,7 @@ impl Indexer {
if cfg!(any(test, feature = "test")) {
panic!(
- "Failed to abort full-text index, region_id: {}, file_id: {}, err: {}",
+ "Failed to abort full-text index, region_id: {}, file_id: {}, err: {:?}",
self.region_id, self.file_id, err
);
} else {
@@ -64,4 +65,25 @@ impl Indexer {
);
}
}
+
+ async fn do_abort_bloom_filter(&mut self) {
+ let Some(mut indexer) = self.bloom_filter_indexer.take() else {
+ return;
+ };
+ let Err(err) = indexer.abort().await else {
+ return;
+ };
+
+ if cfg!(any(test, feature = "test")) {
+ panic!(
+ "Failed to abort bloom filter, region_id: {}, file_id: {}, err: {:?}",
+ self.region_id, self.file_id, err
+ );
+ } else {
+ warn!(
+ err; "Failed to abort bloom filter, region_id: {}, file_id: {}",
+ self.region_id, self.file_id,
+ );
+ }
+ }
}
diff --git a/src/mito2/src/sst/index/indexer/finish.rs b/src/mito2/src/sst/index/indexer/finish.rs
index a0157a9b66f4..025eead758ff 100644
--- a/src/mito2/src/sst/index/indexer/finish.rs
+++ b/src/mito2/src/sst/index/indexer/finish.rs
@@ -15,11 +15,14 @@
use common_telemetry::{debug, warn};
use puffin::puffin_manager::{PuffinManager, PuffinWriter};
+use crate::sst::index::bloom_filter::creator::BloomFilterIndexer;
use crate::sst::index::fulltext_index::creator::FulltextIndexer;
use crate::sst::index::inverted_index::creator::InvertedIndexer;
use crate::sst::index::puffin_manager::SstPuffinWriter;
use crate::sst::index::statistics::{ByteCount, RowCount};
-use crate::sst::index::{FulltextIndexOutput, IndexOutput, Indexer, InvertedIndexOutput};
+use crate::sst::index::{
+ BloomFilterOutput, FulltextIndexOutput, IndexOutput, Indexer, InvertedIndexOutput,
+};
impl Indexer {
pub(crate) async fn do_finish(&mut self) -> IndexOutput {
@@ -46,6 +49,12 @@ impl Indexer {
return IndexOutput::default();
}
+ let success = self.do_finish_bloom_filter(&mut writer, &mut output).await;
+ if !success {
+ self.do_abort().await;
+ return IndexOutput::default();
+ }
+
output.file_size = self.do_finish_puffin_writer(writer).await;
output
}
@@ -60,7 +69,7 @@ impl Indexer {
if cfg!(any(test, feature = "test")) {
panic!(
- "Failed to create puffin writer, region_id: {}, file_id: {}, err: {}",
+ "Failed to create puffin writer, region_id: {}, file_id: {}, err: {:?}",
self.region_id, self.file_id, err
);
} else {
@@ -81,7 +90,7 @@ impl Indexer {
if cfg!(any(test, feature = "test")) {
panic!(
- "Failed to finish puffin writer, region_id: {}, file_id: {}, err: {}",
+ "Failed to finish puffin writer, region_id: {}, file_id: {}, err: {:?}",
self.region_id, self.file_id, err
);
} else {
@@ -119,7 +128,7 @@ impl Indexer {
if cfg!(any(test, feature = "test")) {
panic!(
- "Failed to finish inverted index, region_id: {}, file_id: {}, err: {}",
+ "Failed to finish inverted index, region_id: {}, file_id: {}, err: {:?}",
self.region_id, self.file_id, err
);
} else {
@@ -156,7 +165,7 @@ impl Indexer {
if cfg!(any(test, feature = "test")) {
panic!(
- "Failed to finish full-text index, region_id: {}, file_id: {}, err: {}",
+ "Failed to finish full-text index, region_id: {}, file_id: {}, err: {:?}",
self.region_id, self.file_id, err
);
} else {
@@ -169,6 +178,43 @@ impl Indexer {
false
}
+ async fn do_finish_bloom_filter(
+ &mut self,
+ puffin_writer: &mut SstPuffinWriter,
+ index_output: &mut IndexOutput,
+ ) -> bool {
+ let Some(mut indexer) = self.bloom_filter_indexer.take() else {
+ return true;
+ };
+
+ let err = match indexer.finish(puffin_writer).await {
+ Ok((row_count, byte_count)) => {
+ self.fill_bloom_filter_output(
+ &mut index_output.bloom_filter,
+ row_count,
+ byte_count,
+ &indexer,
+ );
+ return true;
+ }
+ Err(err) => err,
+ };
+
+ if cfg!(any(test, feature = "test")) {
+ panic!(
+ "Failed to finish bloom filter, region_id: {}, file_id: {}, err: {:?}",
+ self.region_id, self.file_id, err
+ );
+ } else {
+ warn!(
+ err; "Failed to finish bloom filter, region_id: {}, file_id: {}",
+ self.region_id, self.file_id,
+ );
+ }
+
+ false
+ }
+
fn fill_inverted_index_output(
&mut self,
output: &mut InvertedIndexOutput,
@@ -202,4 +248,21 @@ impl Indexer {
output.row_count = row_count;
output.columns = indexer.column_ids().collect();
}
+
+ fn fill_bloom_filter_output(
+ &mut self,
+ output: &mut BloomFilterOutput,
+ row_count: RowCount,
+ byte_count: ByteCount,
+ indexer: &BloomFilterIndexer,
+ ) {
+ debug!(
+ "Bloom filter created, region_id: {}, file_id: {}, written_bytes: {}, written_rows: {}",
+ self.region_id, self.file_id, byte_count, row_count
+ );
+
+ output.index_size = byte_count;
+ output.row_count = row_count;
+ output.columns = indexer.column_ids().collect();
+ }
}
diff --git a/src/mito2/src/sst/index/indexer/update.rs b/src/mito2/src/sst/index/indexer/update.rs
index c08f171bb415..c2ab33f0e13a 100644
--- a/src/mito2/src/sst/index/indexer/update.rs
+++ b/src/mito2/src/sst/index/indexer/update.rs
@@ -29,6 +29,9 @@ impl Indexer {
if !self.do_update_fulltext_index(batch).await {
self.do_abort().await;
}
+ if !self.do_update_bloom_filter(batch).await {
+ self.do_abort().await;
+ }
}
/// Returns false if the update failed.
@@ -43,7 +46,7 @@ impl Indexer {
if cfg!(any(test, feature = "test")) {
panic!(
- "Failed to update inverted index, region_id: {}, file_id: {}, err: {}",
+ "Failed to update inverted index, region_id: {}, file_id: {}, err: {:?}",
self.region_id, self.file_id, err
);
} else {
@@ -68,7 +71,7 @@ impl Indexer {
if cfg!(any(test, feature = "test")) {
panic!(
- "Failed to update full-text index, region_id: {}, file_id: {}, err: {}",
+ "Failed to update full-text index, region_id: {}, file_id: {}, err: {:?}",
self.region_id, self.file_id, err
);
} else {
@@ -80,4 +83,29 @@ impl Indexer {
false
}
+
+ /// Returns false if the update failed.
+ async fn do_update_bloom_filter(&mut self, batch: &Batch) -> bool {
+ let Some(creator) = self.bloom_filter_indexer.as_mut() else {
+ return true;
+ };
+
+ let Err(err) = creator.update(batch).await else {
+ return true;
+ };
+
+ if cfg!(any(test, feature = "test")) {
+ panic!(
+ "Failed to update bloom filter, region_id: {}, file_id: {}, err: {:?}",
+ self.region_id, self.file_id, err
+ );
+ } else {
+ warn!(
+ err; "Failed to update bloom filter, region_id: {}, file_id: {}",
+ self.region_id, self.file_id,
+ );
+ }
+
+ false
+ }
}
diff --git a/src/mito2/src/sst/index/intermediate.rs b/src/mito2/src/sst/index/intermediate.rs
index d0da804c745b..fd8845f96ac3 100644
--- a/src/mito2/src/sst/index/intermediate.rs
+++ b/src/mito2/src/sst/index/intermediate.rs
@@ -14,13 +14,25 @@
use std::path::PathBuf;
+use async_trait::async_trait;
+use common_error::ext::BoxedError;
use common_telemetry::warn;
+use futures::{AsyncRead, AsyncWrite};
+use index::error as index_error;
+use index::error::Result as IndexResult;
+use index::external_provider::ExternalTempFileProvider;
use object_store::util::{self, normalize_dir};
+use snafu::ResultExt;
use store_api::storage::{ColumnId, RegionId};
use uuid::Uuid;
use crate::access_layer::new_fs_cache_store;
use crate::error::Result;
+use crate::metrics::{
+ INDEX_INTERMEDIATE_FLUSH_OP_TOTAL, INDEX_INTERMEDIATE_READ_BYTES_TOTAL,
+ INDEX_INTERMEDIATE_READ_OP_TOTAL, INDEX_INTERMEDIATE_SEEK_OP_TOTAL,
+ INDEX_INTERMEDIATE_WRITE_BYTES_TOTAL, INDEX_INTERMEDIATE_WRITE_OP_TOTAL,
+};
use crate::sst::file::FileId;
use crate::sst::index::store::InstrumentedStore;
@@ -129,14 +141,105 @@ impl IntermediateLocation {
}
}
+/// `TempFileProvider` implements `ExternalTempFileProvider`.
+/// It uses `InstrumentedStore` to create and read intermediate files.
+pub(crate) struct TempFileProvider {
+ /// Provides the location of intermediate files.
+ location: IntermediateLocation,
+ /// Provides store to access to intermediate files.
+ manager: IntermediateManager,
+}
+
+#[async_trait]
+impl ExternalTempFileProvider for TempFileProvider {
+ async fn create(
+ &self,
+ file_group: &str,
+ file_id: &str,
+ ) -> IndexResult<Box<dyn AsyncWrite + Unpin + Send>> {
+ let path = self.location.file_path(file_group, file_id);
+ let writer = self
+ .manager
+ .store()
+ .writer(
+ &path,
+ &INDEX_INTERMEDIATE_WRITE_BYTES_TOTAL,
+ &INDEX_INTERMEDIATE_WRITE_OP_TOTAL,
+ &INDEX_INTERMEDIATE_FLUSH_OP_TOTAL,
+ )
+ .await
+ .map_err(BoxedError::new)
+ .context(index_error::ExternalSnafu)?;
+ Ok(Box::new(writer))
+ }
+
+ async fn read_all(
+ &self,
+ file_group: &str,
+ ) -> IndexResult<Vec<(String, Box<dyn AsyncRead + Unpin + Send>)>> {
+ let file_group_path = self.location.file_group_path(file_group);
+ let entries = self
+ .manager
+ .store()
+ .list(&file_group_path)
+ .await
+ .map_err(BoxedError::new)
+ .context(index_error::ExternalSnafu)?;
+ let mut readers = Vec::with_capacity(entries.len());
+
+ for entry in entries {
+ if entry.metadata().is_dir() {
+ warn!("Unexpected entry in index creation dir: {:?}", entry.path());
+ continue;
+ }
+
+ let im_file_id = self.location.im_file_id_from_path(entry.path());
+
+ let reader = self
+ .manager
+ .store()
+ .reader(
+ entry.path(),
+ &INDEX_INTERMEDIATE_READ_BYTES_TOTAL,
+ &INDEX_INTERMEDIATE_READ_OP_TOTAL,
+ &INDEX_INTERMEDIATE_SEEK_OP_TOTAL,
+ )
+ .await
+ .map_err(BoxedError::new)
+ .context(index_error::ExternalSnafu)?;
+ readers.push((im_file_id, Box::new(reader) as _));
+ }
+
+ Ok(readers)
+ }
+}
+
+impl TempFileProvider {
+ /// Creates a new `TempFileProvider`.
+ pub fn new(location: IntermediateLocation, manager: IntermediateManager) -> Self {
+ Self { location, manager }
+ }
+
+ /// Removes all intermediate files.
+ pub async fn cleanup(&self) -> Result<()> {
+ self.manager
+ .store()
+ .remove_all(self.location.dir_to_cleanup())
+ .await
+ }
+}
+
#[cfg(test)]
mod tests {
use std::ffi::OsStr;
use common_test_util::temp_dir;
+ use futures::{AsyncReadExt, AsyncWriteExt};
use regex::Regex;
+ use store_api::storage::RegionId;
use super::*;
+ use crate::sst::file::FileId;
#[tokio::test]
async fn test_manager() {
@@ -212,4 +315,58 @@ mod tests {
.is_match(&pi.next().unwrap().to_string_lossy())); // fulltext path
assert!(pi.next().is_none());
}
+
+ #[tokio::test]
+ async fn test_temp_file_provider_basic() {
+ let temp_dir = temp_dir::create_temp_dir("intermediate");
+ let path = temp_dir.path().display().to_string();
+
+ let location = IntermediateLocation::new(&RegionId::new(0, 0), &FileId::random());
+ let store = IntermediateManager::init_fs(path).await.unwrap();
+ let provider = TempFileProvider::new(location.clone(), store);
+
+ let file_group = "tag0";
+ let file_id = "0000000010";
+ let mut writer = provider.create(file_group, file_id).await.unwrap();
+ writer.write_all(b"hello").await.unwrap();
+ writer.flush().await.unwrap();
+ writer.close().await.unwrap();
+
+ let file_id = "0000000100";
+ let mut writer = provider.create(file_group, file_id).await.unwrap();
+ writer.write_all(b"world").await.unwrap();
+ writer.flush().await.unwrap();
+ writer.close().await.unwrap();
+
+ let file_group = "tag1";
+ let file_id = "0000000010";
+ let mut writer = provider.create(file_group, file_id).await.unwrap();
+ writer.write_all(b"foo").await.unwrap();
+ writer.flush().await.unwrap();
+ writer.close().await.unwrap();
+
+ let readers = provider.read_all("tag0").await.unwrap();
+ assert_eq!(readers.len(), 2);
+ for (_, mut reader) in readers {
+ let mut buf = Vec::new();
+ reader.read_to_end(&mut buf).await.unwrap();
+ assert!(matches!(buf.as_slice(), b"hello" | b"world"));
+ }
+ let readers = provider.read_all("tag1").await.unwrap();
+ assert_eq!(readers.len(), 1);
+ let mut reader = readers.into_iter().map(|x| x.1).next().unwrap();
+ let mut buf = Vec::new();
+ reader.read_to_end(&mut buf).await.unwrap();
+ assert_eq!(buf, b"foo");
+
+ provider.cleanup().await.unwrap();
+
+ assert!(provider
+ .manager
+ .store()
+ .list(location.dir_to_cleanup())
+ .await
+ .unwrap()
+ .is_empty());
+ }
}
diff --git a/src/mito2/src/sst/index/inverted_index.rs b/src/mito2/src/sst/index/inverted_index.rs
index d325f735a431..73dca4ac47f2 100644
--- a/src/mito2/src/sst/index/inverted_index.rs
+++ b/src/mito2/src/sst/index/inverted_index.rs
@@ -13,7 +13,6 @@
// limitations under the License.
pub(crate) mod applier;
-mod codec;
pub(crate) mod creator;
const INDEX_BLOB_TYPE: &str = "greptime-inverted-index-v1";
diff --git a/src/mito2/src/sst/index/inverted_index/applier/builder.rs b/src/mito2/src/sst/index/inverted_index/applier/builder.rs
index c2f90b293003..e14bb89bd1c9 100644
--- a/src/mito2/src/sst/index/inverted_index/applier/builder.rs
+++ b/src/mito2/src/sst/index/inverted_index/applier/builder.rs
@@ -37,8 +37,8 @@ use crate::cache::file_cache::FileCacheRef;
use crate::cache::index::inverted_index::InvertedIndexCacheRef;
use crate::error::{BuildIndexApplierSnafu, ColumnNotFoundSnafu, ConvertValueSnafu, Result};
use crate::row_converter::SortField;
+use crate::sst::index::codec::IndexValueCodec;
use crate::sst::index::inverted_index::applier::InvertedIndexApplier;
-use crate::sst::index::inverted_index::codec::IndexValueCodec;
use crate::sst::index::puffin_manager::PuffinManagerFactory;
/// Constructs an [`InvertedIndexApplier`] which applies predicates to SST files during scan.
diff --git a/src/mito2/src/sst/index/inverted_index/creator.rs b/src/mito2/src/sst/index/inverted_index/creator.rs
index 0076322fccbd..138035d554a1 100644
--- a/src/mito2/src/sst/index/inverted_index/creator.rs
+++ b/src/mito2/src/sst/index/inverted_index/creator.rs
@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-pub(crate) mod temp_provider;
-
use std::collections::HashSet;
use std::num::NonZeroUsize;
use std::sync::atomic::AtomicUsize;
@@ -38,9 +36,10 @@ use crate::error::{
use crate::read::Batch;
use crate::row_converter::SortField;
use crate::sst::file::FileId;
-use crate::sst::index::intermediate::{IntermediateLocation, IntermediateManager};
-use crate::sst::index::inverted_index::codec::{IndexValueCodec, IndexValuesCodec};
-use crate::sst::index::inverted_index::creator::temp_provider::TempFileProvider;
+use crate::sst::index::codec::{IndexValueCodec, IndexValuesCodec};
+use crate::sst::index::intermediate::{
+ IntermediateLocation, IntermediateManager, TempFileProvider,
+};
use crate::sst::index::inverted_index::INDEX_BLOB_TYPE;
use crate::sst::index::puffin_manager::SstPuffinWriter;
use crate::sst::index::statistics::{ByteCount, RowCount, Statistics};
diff --git a/src/mito2/src/sst/index/inverted_index/creator/temp_provider.rs b/src/mito2/src/sst/index/inverted_index/creator/temp_provider.rs
deleted file mode 100644
index 1822f3119459..000000000000
--- a/src/mito2/src/sst/index/inverted_index/creator/temp_provider.rs
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use async_trait::async_trait;
-use common_error::ext::BoxedError;
-use common_telemetry::warn;
-use futures::{AsyncRead, AsyncWrite};
-use index::error as index_error;
-use index::error::Result as IndexResult;
-use index::external_provider::ExternalTempFileProvider;
-use snafu::ResultExt;
-
-use crate::error::Result;
-use crate::metrics::{
- INDEX_INTERMEDIATE_FLUSH_OP_TOTAL, INDEX_INTERMEDIATE_READ_BYTES_TOTAL,
- INDEX_INTERMEDIATE_READ_OP_TOTAL, INDEX_INTERMEDIATE_SEEK_OP_TOTAL,
- INDEX_INTERMEDIATE_WRITE_BYTES_TOTAL, INDEX_INTERMEDIATE_WRITE_OP_TOTAL,
-};
-use crate::sst::index::intermediate::{IntermediateLocation, IntermediateManager};
-
-/// `TempFileProvider` implements `ExternalTempFileProvider`.
-/// It uses `InstrumentedStore` to create and read intermediate files.
-pub(crate) struct TempFileProvider {
- /// Provides the location of intermediate files.
- location: IntermediateLocation,
- /// Provides store to access to intermediate files.
- manager: IntermediateManager,
-}
-
-#[async_trait]
-impl ExternalTempFileProvider for TempFileProvider {
- async fn create(
- &self,
- file_group: &str,
- file_id: &str,
- ) -> IndexResult<Box<dyn AsyncWrite + Unpin + Send>> {
- let path = self.location.file_path(file_group, file_id);
- let writer = self
- .manager
- .store()
- .writer(
- &path,
- &INDEX_INTERMEDIATE_WRITE_BYTES_TOTAL,
- &INDEX_INTERMEDIATE_WRITE_OP_TOTAL,
- &INDEX_INTERMEDIATE_FLUSH_OP_TOTAL,
- )
- .await
- .map_err(BoxedError::new)
- .context(index_error::ExternalSnafu)?;
- Ok(Box::new(writer))
- }
-
- async fn read_all(
- &self,
- file_group: &str,
- ) -> IndexResult<Vec<(String, Box<dyn AsyncRead + Unpin + Send>)>> {
- let file_group_path = self.location.file_group_path(file_group);
- let entries = self
- .manager
- .store()
- .list(&file_group_path)
- .await
- .map_err(BoxedError::new)
- .context(index_error::ExternalSnafu)?;
- let mut readers = Vec::with_capacity(entries.len());
-
- for entry in entries {
- if entry.metadata().is_dir() {
- warn!("Unexpected entry in index creation dir: {:?}", entry.path());
- continue;
- }
-
- let im_file_id = self.location.im_file_id_from_path(entry.path());
-
- let reader = self
- .manager
- .store()
- .reader(
- entry.path(),
- &INDEX_INTERMEDIATE_READ_BYTES_TOTAL,
- &INDEX_INTERMEDIATE_READ_OP_TOTAL,
- &INDEX_INTERMEDIATE_SEEK_OP_TOTAL,
- )
- .await
- .map_err(BoxedError::new)
- .context(index_error::ExternalSnafu)?;
- readers.push((im_file_id, Box::new(reader) as _));
- }
-
- Ok(readers)
- }
-}
-
-impl TempFileProvider {
- /// Creates a new `TempFileProvider`.
- pub fn new(location: IntermediateLocation, manager: IntermediateManager) -> Self {
- Self { location, manager }
- }
-
- /// Removes all intermediate files.
- pub async fn cleanup(&self) -> Result<()> {
- self.manager
- .store()
- .remove_all(self.location.dir_to_cleanup())
- .await
- }
-}
-
-#[cfg(test)]
-mod tests {
- use common_test_util::temp_dir;
- use futures::{AsyncReadExt, AsyncWriteExt};
- use store_api::storage::RegionId;
-
- use super::*;
- use crate::sst::file::FileId;
-
- #[tokio::test]
- async fn test_temp_file_provider_basic() {
- let temp_dir = temp_dir::create_temp_dir("intermediate");
- let path = temp_dir.path().display().to_string();
-
- let location = IntermediateLocation::new(&RegionId::new(0, 0), &FileId::random());
- let store = IntermediateManager::init_fs(path).await.unwrap();
- let provider = TempFileProvider::new(location.clone(), store);
-
- let file_group = "tag0";
- let file_id = "0000000010";
- let mut writer = provider.create(file_group, file_id).await.unwrap();
- writer.write_all(b"hello").await.unwrap();
- writer.flush().await.unwrap();
- writer.close().await.unwrap();
-
- let file_id = "0000000100";
- let mut writer = provider.create(file_group, file_id).await.unwrap();
- writer.write_all(b"world").await.unwrap();
- writer.flush().await.unwrap();
- writer.close().await.unwrap();
-
- let file_group = "tag1";
- let file_id = "0000000010";
- let mut writer = provider.create(file_group, file_id).await.unwrap();
- writer.write_all(b"foo").await.unwrap();
- writer.flush().await.unwrap();
- writer.close().await.unwrap();
-
- let readers = provider.read_all("tag0").await.unwrap();
- assert_eq!(readers.len(), 2);
- for (_, mut reader) in readers {
- let mut buf = Vec::new();
- reader.read_to_end(&mut buf).await.unwrap();
- assert!(matches!(buf.as_slice(), b"hello" | b"world"));
- }
- let readers = provider.read_all("tag1").await.unwrap();
- assert_eq!(readers.len(), 1);
- let mut reader = readers.into_iter().map(|x| x.1).next().unwrap();
- let mut buf = Vec::new();
- reader.read_to_end(&mut buf).await.unwrap();
- assert_eq!(buf, b"foo");
-
- provider.cleanup().await.unwrap();
-
- assert!(provider
- .manager
- .store()
- .list(location.dir_to_cleanup())
- .await
- .unwrap()
- .is_empty());
- }
-}
|
feat
|
integrate indexer with mito2 (#5236)
|
8b730678151ccece500a99cbbad97aad4e0cae51
|
2024-02-19 12:17:14
|
dennis zhuang
|
feat: impl partitions and region_peers information schema (#3278)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 6a660df9c79f..dfd285122acb 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -6324,6 +6324,7 @@ dependencies = [
"datafusion-common",
"datafusion-expr",
"datatypes",
+ "itertools 0.10.5",
"lazy_static",
"meta-client",
"moka",
diff --git a/src/catalog/src/error.rs b/src/catalog/src/error.rs
index c9fd60e9ee4e..11cb3df96b71 100644
--- a/src/catalog/src/error.rs
+++ b/src/catalog/src/error.rs
@@ -164,6 +164,15 @@ pub enum Error {
location: Location,
},
+ #[snafu(display("Failed to find table partitions: #{table}"))]
+ FindPartitions {
+ source: partition::error::Error,
+ table: String,
+ },
+
+ #[snafu(display("Failed to find region routes"))]
+ FindRegionRoutes { source: partition::error::Error },
+
#[snafu(display("Failed to read system catalog table records"))]
ReadSystemCatalog {
location: Location,
@@ -254,11 +263,14 @@ impl ErrorExt for Error {
match self {
Error::InvalidKey { .. }
| Error::SchemaNotFound { .. }
- | Error::TableNotFound { .. }
| Error::CatalogNotFound { .. }
+ | Error::FindPartitions { .. }
+ | Error::FindRegionRoutes { .. }
| Error::InvalidEntryType { .. }
| Error::ParallelOpenTable { .. } => StatusCode::Unexpected,
+ Error::TableNotFound { .. } => StatusCode::TableNotFound,
+
Error::SystemCatalog { .. }
| Error::EmptyValue { .. }
| Error::ValueDeserialize { .. } => StatusCode::StorageUnavailable,
diff --git a/src/catalog/src/information_schema.rs b/src/catalog/src/information_schema.rs
index 7abc4797816a..d2d83e4d9d8a 100644
--- a/src/catalog/src/information_schema.rs
+++ b/src/catalog/src/information_schema.rs
@@ -15,7 +15,9 @@
mod columns;
mod key_column_usage;
mod memory_table;
+mod partitions;
mod predicate;
+mod region_peers;
mod runtime_metrics;
mod schemata;
mod table_names;
@@ -47,6 +49,8 @@ use self::columns::InformationSchemaColumns;
use crate::error::Result;
use crate::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
use crate::information_schema::memory_table::{get_schema_columns, MemoryTable};
+use crate::information_schema::partitions::InformationSchemaPartitions;
+use crate::information_schema::region_peers::InformationSchemaRegionPeers;
use crate::information_schema::runtime_metrics::InformationSchemaMetrics;
use crate::information_schema::schemata::InformationSchemaSchemata;
use crate::information_schema::tables::InformationSchemaTables;
@@ -74,6 +78,7 @@ lazy_static! {
TRIGGERS,
GLOBAL_STATUS,
SESSION_STATUS,
+ PARTITIONS,
];
}
@@ -156,6 +161,10 @@ impl InformationSchemaProvider {
BUILD_INFO.to_string(),
self.build_table(BUILD_INFO).unwrap(),
);
+ tables.insert(
+ REGION_PEERS.to_string(),
+ self.build_table(REGION_PEERS).unwrap(),
+ );
}
tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
@@ -226,6 +235,14 @@ impl InformationSchemaProvider {
self.catalog_manager.clone(),
)) as _),
RUNTIME_METRICS => Some(Arc::new(InformationSchemaMetrics::new())),
+ PARTITIONS => Some(Arc::new(InformationSchemaPartitions::new(
+ self.catalog_name.clone(),
+ self.catalog_manager.clone(),
+ )) as _),
+ REGION_PEERS => Some(Arc::new(InformationSchemaRegionPeers::new(
+ self.catalog_name.clone(),
+ self.catalog_manager.clone(),
+ )) as _),
_ => None,
}
}
diff --git a/src/catalog/src/information_schema/columns.rs b/src/catalog/src/information_schema/columns.rs
index dbae86538346..f96cba8f901e 100644
--- a/src/catalog/src/information_schema/columns.rs
+++ b/src/catalog/src/information_schema/columns.rs
@@ -58,6 +58,7 @@ const COLUMN_DEFAULT: &str = "column_default";
const IS_NULLABLE: &str = "is_nullable";
const COLUMN_TYPE: &str = "column_type";
const COLUMN_COMMENT: &str = "column_comment";
+const INIT_CAPACITY: usize = 42;
impl InformationSchemaColumns {
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
@@ -154,16 +155,16 @@ impl InformationSchemaColumnsBuilder {
schema,
catalog_name,
catalog_manager,
- catalog_names: StringVectorBuilder::with_capacity(42),
- schema_names: StringVectorBuilder::with_capacity(42),
- table_names: StringVectorBuilder::with_capacity(42),
- column_names: StringVectorBuilder::with_capacity(42),
- data_types: StringVectorBuilder::with_capacity(42),
- semantic_types: StringVectorBuilder::with_capacity(42),
- column_defaults: StringVectorBuilder::with_capacity(42),
- is_nullables: StringVectorBuilder::with_capacity(42),
- column_types: StringVectorBuilder::with_capacity(42),
- column_comments: StringVectorBuilder::with_capacity(42),
+ catalog_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ schema_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ column_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ data_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ semantic_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ column_defaults: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ is_nullables: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ column_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ column_comments: StringVectorBuilder::with_capacity(INIT_CAPACITY),
}
}
@@ -177,13 +178,6 @@ impl InformationSchemaColumnsBuilder {
let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
- if !catalog_manager
- .schema_exists(&catalog_name, &schema_name)
- .await?
- {
- continue;
- }
-
let mut stream = catalog_manager.tables(&catalog_name, &schema_name).await;
while let Some(table) = stream.try_next().await? {
diff --git a/src/catalog/src/information_schema/key_column_usage.rs b/src/catalog/src/information_schema/key_column_usage.rs
index 28fba3c63ced..f12167ddc4c8 100644
--- a/src/catalog/src/information_schema/key_column_usage.rs
+++ b/src/catalog/src/information_schema/key_column_usage.rs
@@ -23,10 +23,10 @@ use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
-use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
+use datatypes::prelude::{ConcreteDataType, MutableVector, ScalarVectorBuilder, VectorRef};
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::value::Value;
-use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder};
+use datatypes::vectors::{ConstantVector, StringVector, StringVectorBuilder, UInt32VectorBuilder};
use snafu::{OptionExt, ResultExt};
use store_api::storage::{ScanRequest, TableId};
@@ -44,6 +44,7 @@ const TABLE_SCHEMA: &str = "table_schema";
const TABLE_NAME: &str = "table_name";
const COLUMN_NAME: &str = "column_name";
const ORDINAL_POSITION: &str = "ordinal_position";
+const INIT_CAPACITY: usize = 42;
/// The virtual table implementation for `information_schema.KEY_COLUMN_USAGE`.
pub(super) struct InformationSchemaKeyColumnUsage {
@@ -162,9 +163,6 @@ struct InformationSchemaKeyColumnUsageBuilder {
column_name: StringVectorBuilder,
ordinal_position: UInt32VectorBuilder,
position_in_unique_constraint: UInt32VectorBuilder,
- referenced_table_schema: StringVectorBuilder,
- referenced_table_name: StringVectorBuilder,
- referenced_column_name: StringVectorBuilder,
}
impl InformationSchemaKeyColumnUsageBuilder {
@@ -177,18 +175,15 @@ impl InformationSchemaKeyColumnUsageBuilder {
schema,
catalog_name,
catalog_manager,
- constraint_catalog: StringVectorBuilder::with_capacity(42),
- constraint_schema: StringVectorBuilder::with_capacity(42),
- constraint_name: StringVectorBuilder::with_capacity(42),
- table_catalog: StringVectorBuilder::with_capacity(42),
- table_schema: StringVectorBuilder::with_capacity(42),
- table_name: StringVectorBuilder::with_capacity(42),
- column_name: StringVectorBuilder::with_capacity(42),
- ordinal_position: UInt32VectorBuilder::with_capacity(42),
- position_in_unique_constraint: UInt32VectorBuilder::with_capacity(42),
- referenced_table_schema: StringVectorBuilder::with_capacity(42),
- referenced_table_name: StringVectorBuilder::with_capacity(42),
- referenced_column_name: StringVectorBuilder::with_capacity(42),
+ constraint_catalog: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ constraint_schema: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ constraint_name: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ table_catalog: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ table_schema: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ table_name: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ column_name: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ ordinal_position: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
+ position_in_unique_constraint: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
}
}
@@ -301,12 +296,15 @@ impl InformationSchemaKeyColumnUsageBuilder {
self.column_name.push(Some(column_name));
self.ordinal_position.push(Some(ordinal_position));
self.position_in_unique_constraint.push(None);
- self.referenced_table_schema.push(None);
- self.referenced_table_name.push(None);
- self.referenced_column_name.push(None);
}
fn finish(&mut self) -> Result<RecordBatch> {
+ let rows_num = self.table_catalog.len();
+
+ let null_string_vector = Arc::new(ConstantVector::new(
+ Arc::new(StringVector::from(vec![None as Option<&str>])),
+ rows_num,
+ ));
let columns: Vec<VectorRef> = vec![
Arc::new(self.constraint_catalog.finish()),
Arc::new(self.constraint_schema.finish()),
@@ -317,9 +315,9 @@ impl InformationSchemaKeyColumnUsageBuilder {
Arc::new(self.column_name.finish()),
Arc::new(self.ordinal_position.finish()),
Arc::new(self.position_in_unique_constraint.finish()),
- Arc::new(self.referenced_table_schema.finish()),
- Arc::new(self.referenced_table_name.finish()),
- Arc::new(self.referenced_column_name.finish()),
+ null_string_vector.clone(),
+ null_string_vector.clone(),
+ null_string_vector,
];
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
}
diff --git a/src/catalog/src/information_schema/partitions.rs b/src/catalog/src/information_schema/partitions.rs
new file mode 100644
index 000000000000..ecf23f8cc9ce
--- /dev/null
+++ b/src/catalog/src/information_schema/partitions.rs
@@ -0,0 +1,399 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::{Arc, Weak};
+
+use arrow_schema::SchemaRef as ArrowSchemaRef;
+use common_catalog::consts::INFORMATION_SCHEMA_PARTITIONS_TABLE_ID;
+use common_error::ext::BoxedError;
+use common_query::physical_plan::TaskContext;
+use common_recordbatch::adapter::RecordBatchStreamAdapter;
+use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
+use common_time::datetime::DateTime;
+use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
+use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
+use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
+use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
+use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
+use datatypes::value::Value;
+use datatypes::vectors::{
+ ConstantVector, DateTimeVector, DateTimeVectorBuilder, Int64Vector, Int64VectorBuilder,
+ MutableVector, StringVector, StringVectorBuilder, UInt64VectorBuilder,
+};
+use futures::TryStreamExt;
+use partition::manager::PartitionInfo;
+use partition::partition::PartitionDef;
+use snafu::{OptionExt, ResultExt};
+use store_api::storage::{RegionId, ScanRequest, TableId};
+use table::metadata::{TableInfo, TableType};
+
+use super::PARTITIONS;
+use crate::error::{
+ CreateRecordBatchSnafu, FindPartitionsSnafu, InternalSnafu, Result,
+ UpgradeWeakCatalogManagerRefSnafu,
+};
+use crate::information_schema::{InformationTable, Predicates};
+use crate::kvbackend::KvBackendCatalogManager;
+use crate::CatalogManager;
+
+const TABLE_CATALOG: &str = "table_catalog";
+const TABLE_SCHEMA: &str = "table_schema";
+const TABLE_NAME: &str = "table_name";
+const PARTITION_NAME: &str = "partition_name";
+const PARTITION_EXPRESSION: &str = "partition_expression";
+/// The region id
+const GREPTIME_PARTITION_ID: &str = "greptime_partition_id";
+const INIT_CAPACITY: usize = 42;
+
+/// The `PARTITIONS` table provides information about partitioned tables.
+/// See https://dev.mysql.com/doc/refman/8.0/en/information-schema-partitions-table.html
+/// We provide an extral column `greptime_partition_id` for GreptimeDB region id.
+pub(super) struct InformationSchemaPartitions {
+ schema: SchemaRef,
+ catalog_name: String,
+ catalog_manager: Weak<dyn CatalogManager>,
+}
+
+impl InformationSchemaPartitions {
+ pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
+ Self {
+ schema: Self::schema(),
+ catalog_name,
+ catalog_manager,
+ }
+ }
+
+ pub(crate) fn schema() -> SchemaRef {
+ Arc::new(Schema::new(vec![
+ ColumnSchema::new(TABLE_CATALOG, ConcreteDataType::string_datatype(), false),
+ ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
+ ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
+ ColumnSchema::new(PARTITION_NAME, ConcreteDataType::string_datatype(), false),
+ ColumnSchema::new(
+ "subpartition_name",
+ ConcreteDataType::string_datatype(),
+ true,
+ ),
+ ColumnSchema::new(
+ "partition_ordinal_position",
+ ConcreteDataType::int64_datatype(),
+ true,
+ ),
+ ColumnSchema::new(
+ "subpartition_ordinal_position",
+ ConcreteDataType::int64_datatype(),
+ true,
+ ),
+ ColumnSchema::new(
+ "partition_method",
+ ConcreteDataType::string_datatype(),
+ true,
+ ),
+ ColumnSchema::new(
+ "subpartition_method",
+ ConcreteDataType::string_datatype(),
+ true,
+ ),
+ ColumnSchema::new(
+ PARTITION_EXPRESSION,
+ ConcreteDataType::string_datatype(),
+ true,
+ ),
+ ColumnSchema::new(
+ "subpartition_expression",
+ ConcreteDataType::string_datatype(),
+ true,
+ ),
+ ColumnSchema::new(
+ "partition_description",
+ ConcreteDataType::string_datatype(),
+ true,
+ ),
+ ColumnSchema::new("table_rows", ConcreteDataType::int64_datatype(), true),
+ ColumnSchema::new("avg_row_length", ConcreteDataType::int64_datatype(), true),
+ ColumnSchema::new("data_length", ConcreteDataType::int64_datatype(), true),
+ ColumnSchema::new("max_data_length", ConcreteDataType::int64_datatype(), true),
+ ColumnSchema::new("index_length", ConcreteDataType::int64_datatype(), true),
+ ColumnSchema::new("data_free", ConcreteDataType::int64_datatype(), true),
+ ColumnSchema::new("create_time", ConcreteDataType::datetime_datatype(), true),
+ ColumnSchema::new("update_time", ConcreteDataType::datetime_datatype(), true),
+ ColumnSchema::new("check_time", ConcreteDataType::datetime_datatype(), true),
+ ColumnSchema::new("checksum", ConcreteDataType::int64_datatype(), true),
+ ColumnSchema::new(
+ "partition_comment",
+ ConcreteDataType::string_datatype(),
+ true,
+ ),
+ ColumnSchema::new("nodegroup", ConcreteDataType::string_datatype(), true),
+ ColumnSchema::new("tablespace_name", ConcreteDataType::string_datatype(), true),
+ ColumnSchema::new(
+ GREPTIME_PARTITION_ID,
+ ConcreteDataType::uint64_datatype(),
+ true,
+ ),
+ ]))
+ }
+
+ fn builder(&self) -> InformationSchemaPartitionsBuilder {
+ InformationSchemaPartitionsBuilder::new(
+ self.schema.clone(),
+ self.catalog_name.clone(),
+ self.catalog_manager.clone(),
+ )
+ }
+}
+
+impl InformationTable for InformationSchemaPartitions {
+ fn table_id(&self) -> TableId {
+ INFORMATION_SCHEMA_PARTITIONS_TABLE_ID
+ }
+
+ fn table_name(&self) -> &'static str {
+ PARTITIONS
+ }
+
+ fn schema(&self) -> SchemaRef {
+ self.schema.clone()
+ }
+
+ fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
+ let schema = self.schema.arrow_schema().clone();
+ let mut builder = self.builder();
+ let stream = Box::pin(DfRecordBatchStreamAdapter::new(
+ schema,
+ futures::stream::once(async move {
+ builder
+ .make_partitions(Some(request))
+ .await
+ .map(|x| x.into_df_record_batch())
+ .map_err(Into::into)
+ }),
+ ));
+ Ok(Box::pin(
+ RecordBatchStreamAdapter::try_new(stream)
+ .map_err(BoxedError::new)
+ .context(InternalSnafu)?,
+ ))
+ }
+}
+
+struct InformationSchemaPartitionsBuilder {
+ schema: SchemaRef,
+ catalog_name: String,
+ catalog_manager: Weak<dyn CatalogManager>,
+
+ catalog_names: StringVectorBuilder,
+ schema_names: StringVectorBuilder,
+ table_names: StringVectorBuilder,
+ partition_names: StringVectorBuilder,
+ partition_ordinal_positions: Int64VectorBuilder,
+ partition_expressions: StringVectorBuilder,
+ create_times: DateTimeVectorBuilder,
+ partition_ids: UInt64VectorBuilder,
+}
+
+impl InformationSchemaPartitionsBuilder {
+ fn new(
+ schema: SchemaRef,
+ catalog_name: String,
+ catalog_manager: Weak<dyn CatalogManager>,
+ ) -> Self {
+ Self {
+ schema,
+ catalog_name,
+ catalog_manager,
+ catalog_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ schema_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ partition_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ partition_ordinal_positions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
+ partition_expressions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ create_times: DateTimeVectorBuilder::with_capacity(INIT_CAPACITY),
+ partition_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
+ }
+ }
+
+ /// Construct the `information_schema.partitions` virtual table
+ async fn make_partitions(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
+ let catalog_name = self.catalog_name.clone();
+ let catalog_manager = self
+ .catalog_manager
+ .upgrade()
+ .context(UpgradeWeakCatalogManagerRefSnafu)?;
+
+ let partition_manager = catalog_manager
+ .as_any()
+ .downcast_ref::<KvBackendCatalogManager>()
+ .map(|catalog_manager| catalog_manager.partition_manager());
+
+ let predicates = Predicates::from_scan_request(&request);
+
+ for schema_name in catalog_manager.schema_names(&catalog_name).await? {
+ let mut stream = catalog_manager.tables(&catalog_name, &schema_name).await;
+
+ while let Some(table) = stream.try_next().await? {
+ let table_info = table.table_info();
+
+ if table_info.table_type == TableType::Temporary {
+ continue;
+ }
+
+ let table_id = table_info.ident.table_id;
+ let partitions = if let Some(partition_manager) = &partition_manager {
+ partition_manager
+ .find_table_partitions(table_id)
+ .await
+ .context(FindPartitionsSnafu {
+ table: &table_info.name,
+ })?
+ } else {
+ // Current node must be a standalone instance, contains only one partition by default.
+ // TODO(dennis): change it when we support multi-regions for standalone.
+ vec![PartitionInfo {
+ id: RegionId::new(table_id, 0),
+ partition: PartitionDef::new(vec![], vec![]),
+ }]
+ };
+
+ self.add_partitions(
+ &predicates,
+ &table_info,
+ &catalog_name,
+ &schema_name,
+ &table_info.name,
+ &partitions,
+ );
+ }
+ }
+
+ self.finish()
+ }
+
+ #[allow(clippy::too_many_arguments)]
+ fn add_partitions(
+ &mut self,
+ predicates: &Predicates,
+ table_info: &TableInfo,
+ catalog_name: &str,
+ schema_name: &str,
+ table_name: &str,
+ partitions: &[PartitionInfo],
+ ) {
+ let row = [
+ (TABLE_CATALOG, &Value::from(catalog_name)),
+ (TABLE_SCHEMA, &Value::from(schema_name)),
+ (TABLE_NAME, &Value::from(table_name)),
+ ];
+
+ if !predicates.eval(&row) {
+ return;
+ }
+
+ for (index, partition) in partitions.iter().enumerate() {
+ let partition_name = format!("p{index}");
+
+ self.catalog_names.push(Some(catalog_name));
+ self.schema_names.push(Some(schema_name));
+ self.table_names.push(Some(table_name));
+ self.partition_names.push(Some(&partition_name));
+ self.partition_ordinal_positions
+ .push(Some((index + 1) as i64));
+ let expressions = if partition.partition.partition_columns().is_empty() {
+ None
+ } else {
+ Some(partition.partition.to_string())
+ };
+
+ self.partition_expressions.push(expressions.as_deref());
+ self.create_times.push(Some(DateTime::from(
+ table_info.meta.created_on.timestamp_millis(),
+ )));
+ self.partition_ids.push(Some(partition.id.as_u64()));
+ }
+ }
+
+ fn finish(&mut self) -> Result<RecordBatch> {
+ let rows_num = self.catalog_names.len();
+
+ let null_string_vector = Arc::new(ConstantVector::new(
+ Arc::new(StringVector::from(vec![None as Option<&str>])),
+ rows_num,
+ ));
+ let null_i64_vector = Arc::new(ConstantVector::new(
+ Arc::new(Int64Vector::from(vec![None])),
+ rows_num,
+ ));
+ let null_datetime_vector = Arc::new(ConstantVector::new(
+ Arc::new(DateTimeVector::from(vec![None])),
+ rows_num,
+ ));
+ let partition_methods = Arc::new(ConstantVector::new(
+ Arc::new(StringVector::from(vec![Some("RANGE")])),
+ rows_num,
+ ));
+
+ let columns: Vec<VectorRef> = vec![
+ Arc::new(self.catalog_names.finish()),
+ Arc::new(self.schema_names.finish()),
+ Arc::new(self.table_names.finish()),
+ Arc::new(self.partition_names.finish()),
+ null_string_vector.clone(),
+ Arc::new(self.partition_ordinal_positions.finish()),
+ null_i64_vector.clone(),
+ partition_methods,
+ null_string_vector.clone(),
+ Arc::new(self.partition_expressions.finish()),
+ null_string_vector.clone(),
+ null_string_vector.clone(),
+ // TODO(dennis): rows and index statistics info
+ null_i64_vector.clone(),
+ null_i64_vector.clone(),
+ null_i64_vector.clone(),
+ null_i64_vector.clone(),
+ null_i64_vector.clone(),
+ null_i64_vector.clone(),
+ Arc::new(self.create_times.finish()),
+ // TODO(dennis): supports update_time
+ null_datetime_vector.clone(),
+ null_datetime_vector,
+ null_i64_vector,
+ null_string_vector.clone(),
+ null_string_vector.clone(),
+ null_string_vector,
+ Arc::new(self.partition_ids.finish()),
+ ];
+ RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
+ }
+}
+
+impl DfPartitionStream for InformationSchemaPartitions {
+ fn schema(&self) -> &ArrowSchemaRef {
+ self.schema.arrow_schema()
+ }
+
+ fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
+ let schema = self.schema.arrow_schema().clone();
+ let mut builder = self.builder();
+ Box::pin(DfRecordBatchStreamAdapter::new(
+ schema,
+ futures::stream::once(async move {
+ builder
+ .make_partitions(None)
+ .await
+ .map(|x| x.into_df_record_batch())
+ .map_err(Into::into)
+ }),
+ ))
+ }
+}
diff --git a/src/catalog/src/information_schema/region_peers.rs b/src/catalog/src/information_schema/region_peers.rs
new file mode 100644
index 000000000000..882ad263092c
--- /dev/null
+++ b/src/catalog/src/information_schema/region_peers.rs
@@ -0,0 +1,279 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use core::pin::pin;
+use std::sync::{Arc, Weak};
+
+use arrow_schema::SchemaRef as ArrowSchemaRef;
+use common_catalog::consts::INFORMATION_SCHEMA_REGION_PEERS_TABLE_ID;
+use common_error::ext::BoxedError;
+use common_meta::rpc::router::RegionRoute;
+use common_query::physical_plan::TaskContext;
+use common_recordbatch::adapter::RecordBatchStreamAdapter;
+use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
+use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
+use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
+use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
+use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
+use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
+use datatypes::value::Value;
+use datatypes::vectors::{Int64VectorBuilder, StringVectorBuilder, UInt64VectorBuilder};
+use futures::{StreamExt, TryStreamExt};
+use snafu::{OptionExt, ResultExt};
+use store_api::storage::{ScanRequest, TableId};
+use table::metadata::TableType;
+
+use super::REGION_PEERS;
+use crate::error::{
+ CreateRecordBatchSnafu, FindRegionRoutesSnafu, InternalSnafu, Result,
+ UpgradeWeakCatalogManagerRefSnafu,
+};
+use crate::information_schema::{InformationTable, Predicates};
+use crate::kvbackend::KvBackendCatalogManager;
+use crate::CatalogManager;
+
+const REGION_ID: &str = "region_id";
+const PEER_ID: &str = "peer_id";
+const PEER_ADDR: &str = "peer_addr";
+const IS_LEADER: &str = "is_leader";
+const STATUS: &str = "status";
+const DOWN_SECONDS: &str = "down_seconds";
+const INIT_CAPACITY: usize = 42;
+
+/// The `REGION_PEERS` table provides information about the region distribution and routes. Including fields:
+///
+/// - `region_id`: the region id
+/// - `peer_id`: the region storage datanode peer id
+/// - `peer_addr`: the region storage datanode peer address
+/// - `is_leader`: whether the peer is the leader
+/// - `status`: the region status, `ALIVE` or `DOWNGRADED`.
+/// - `down_seconds`: the duration of being offline, in seconds.
+///
+pub(super) struct InformationSchemaRegionPeers {
+ schema: SchemaRef,
+ catalog_name: String,
+ catalog_manager: Weak<dyn CatalogManager>,
+}
+
+impl InformationSchemaRegionPeers {
+ pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
+ Self {
+ schema: Self::schema(),
+ catalog_name,
+ catalog_manager,
+ }
+ }
+
+ pub(crate) fn schema() -> SchemaRef {
+ Arc::new(Schema::new(vec![
+ ColumnSchema::new(REGION_ID, ConcreteDataType::uint64_datatype(), false),
+ ColumnSchema::new(PEER_ID, ConcreteDataType::uint64_datatype(), true),
+ ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
+ ColumnSchema::new(IS_LEADER, ConcreteDataType::string_datatype(), true),
+ ColumnSchema::new(STATUS, ConcreteDataType::string_datatype(), true),
+ ColumnSchema::new(DOWN_SECONDS, ConcreteDataType::int64_datatype(), true),
+ ]))
+ }
+
+ fn builder(&self) -> InformationSchemaRegionPeersBuilder {
+ InformationSchemaRegionPeersBuilder::new(
+ self.schema.clone(),
+ self.catalog_name.clone(),
+ self.catalog_manager.clone(),
+ )
+ }
+}
+
+impl InformationTable for InformationSchemaRegionPeers {
+ fn table_id(&self) -> TableId {
+ INFORMATION_SCHEMA_REGION_PEERS_TABLE_ID
+ }
+
+ fn table_name(&self) -> &'static str {
+ REGION_PEERS
+ }
+
+ fn schema(&self) -> SchemaRef {
+ self.schema.clone()
+ }
+
+ fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
+ let schema = self.schema.arrow_schema().clone();
+ let mut builder = self.builder();
+ let stream = Box::pin(DfRecordBatchStreamAdapter::new(
+ schema,
+ futures::stream::once(async move {
+ builder
+ .make_region_peers(Some(request))
+ .await
+ .map(|x| x.into_df_record_batch())
+ .map_err(Into::into)
+ }),
+ ));
+ Ok(Box::pin(
+ RecordBatchStreamAdapter::try_new(stream)
+ .map_err(BoxedError::new)
+ .context(InternalSnafu)?,
+ ))
+ }
+}
+
+struct InformationSchemaRegionPeersBuilder {
+ schema: SchemaRef,
+ catalog_name: String,
+ catalog_manager: Weak<dyn CatalogManager>,
+
+ region_ids: UInt64VectorBuilder,
+ peer_ids: UInt64VectorBuilder,
+ peer_addrs: StringVectorBuilder,
+ is_leaders: StringVectorBuilder,
+ statuses: StringVectorBuilder,
+ down_seconds: Int64VectorBuilder,
+}
+
+impl InformationSchemaRegionPeersBuilder {
+ fn new(
+ schema: SchemaRef,
+ catalog_name: String,
+ catalog_manager: Weak<dyn CatalogManager>,
+ ) -> Self {
+ Self {
+ schema,
+ catalog_name,
+ catalog_manager,
+ region_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
+ peer_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
+ peer_addrs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ is_leaders: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ statuses: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ down_seconds: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
+ }
+ }
+
+ /// Construct the `information_schema.region_peers` virtual table
+ async fn make_region_peers(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
+ let catalog_name = self.catalog_name.clone();
+ let catalog_manager = self
+ .catalog_manager
+ .upgrade()
+ .context(UpgradeWeakCatalogManagerRefSnafu)?;
+
+ let partition_manager = catalog_manager
+ .as_any()
+ .downcast_ref::<KvBackendCatalogManager>()
+ .map(|catalog_manager| catalog_manager.partition_manager());
+
+ let predicates = Predicates::from_scan_request(&request);
+
+ for schema_name in catalog_manager.schema_names(&catalog_name).await? {
+ let table_id_stream = catalog_manager
+ .tables(&catalog_name, &schema_name)
+ .await
+ .try_filter_map(|t| async move {
+ let table_info = t.table_info();
+ if table_info.table_type == TableType::Temporary {
+ Ok(None)
+ } else {
+ Ok(Some(table_info.ident.table_id))
+ }
+ });
+
+ const BATCH_SIZE: usize = 128;
+
+ // Split table ids into chunks
+ let mut table_id_chunks = pin!(table_id_stream.ready_chunks(BATCH_SIZE));
+
+ while let Some(table_ids) = table_id_chunks.next().await {
+ let table_ids = table_ids.into_iter().collect::<Result<Vec<_>>>()?;
+
+ let table_routes = if let Some(partition_manager) = &partition_manager {
+ partition_manager
+ .find_region_routes_batch(&table_ids)
+ .await
+ .context(FindRegionRoutesSnafu)?
+ } else {
+ table_ids.into_iter().map(|id| (id, vec![])).collect()
+ };
+
+ for routes in table_routes.values() {
+ self.add_region_peers(&predicates, routes);
+ }
+ }
+ }
+
+ self.finish()
+ }
+
+ fn add_region_peers(&mut self, predicates: &Predicates, routes: &[RegionRoute]) {
+ for route in routes {
+ let region_id = route.region.id.as_u64();
+ let peer_id = route.leader_peer.clone().map(|p| p.id);
+ let peer_addr = route.leader_peer.clone().map(|p| p.addr);
+ let status = if let Some(status) = route.leader_status {
+ Some(status.as_ref().to_string())
+ } else {
+ // Alive by default
+ Some("ALIVE".to_string())
+ };
+
+ let row = [(REGION_ID, &Value::from(region_id))];
+
+ if !predicates.eval(&row) {
+ return;
+ }
+
+ // TODO(dennis): adds followers.
+ self.region_ids.push(Some(region_id));
+ self.peer_ids.push(peer_id);
+ self.peer_addrs.push(peer_addr.as_deref());
+ self.is_leaders.push(Some("Yes"));
+ self.statuses.push(status.as_deref());
+ self.down_seconds
+ .push(route.leader_down_millis().map(|m| m / 1000));
+ }
+ }
+
+ fn finish(&mut self) -> Result<RecordBatch> {
+ let columns: Vec<VectorRef> = vec![
+ Arc::new(self.region_ids.finish()),
+ Arc::new(self.peer_ids.finish()),
+ Arc::new(self.peer_addrs.finish()),
+ Arc::new(self.is_leaders.finish()),
+ Arc::new(self.statuses.finish()),
+ Arc::new(self.down_seconds.finish()),
+ ];
+ RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
+ }
+}
+
+impl DfPartitionStream for InformationSchemaRegionPeers {
+ fn schema(&self) -> &ArrowSchemaRef {
+ self.schema.arrow_schema()
+ }
+
+ fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
+ let schema = self.schema.arrow_schema().clone();
+ let mut builder = self.builder();
+ Box::pin(DfRecordBatchStreamAdapter::new(
+ schema,
+ futures::stream::once(async move {
+ builder
+ .make_region_peers(None)
+ .await
+ .map(|x| x.into_df_record_batch())
+ .map_err(Into::into)
+ }),
+ ))
+ }
+}
diff --git a/src/catalog/src/information_schema/schemata.rs b/src/catalog/src/information_schema/schemata.rs
index eddfb142cc77..9f435878658e 100644
--- a/src/catalog/src/information_schema/schemata.rs
+++ b/src/catalog/src/information_schema/schemata.rs
@@ -41,6 +41,7 @@ const CATALOG_NAME: &str = "catalog_name";
const SCHEMA_NAME: &str = "schema_name";
const DEFAULT_CHARACTER_SET_NAME: &str = "default_character_set_name";
const DEFAULT_COLLATION_NAME: &str = "default_collation_name";
+const INIT_CAPACITY: usize = 42;
/// The `information_schema.schemata` table implementation.
pub(super) struct InformationSchemaSchemata {
@@ -144,11 +145,11 @@ impl InformationSchemaSchemataBuilder {
schema,
catalog_name,
catalog_manager,
- catalog_names: StringVectorBuilder::with_capacity(42),
- schema_names: StringVectorBuilder::with_capacity(42),
- charset_names: StringVectorBuilder::with_capacity(42),
- collation_names: StringVectorBuilder::with_capacity(42),
- sql_paths: StringVectorBuilder::with_capacity(42),
+ catalog_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ schema_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ charset_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ collation_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ sql_paths: StringVectorBuilder::with_capacity(INIT_CAPACITY),
}
}
@@ -162,13 +163,6 @@ impl InformationSchemaSchemataBuilder {
let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
- if !catalog_manager
- .schema_exists(&catalog_name, &schema_name)
- .await?
- {
- continue;
- }
-
self.add_schema(&predicates, &catalog_name, &schema_name);
}
diff --git a/src/catalog/src/information_schema/table_names.rs b/src/catalog/src/information_schema/table_names.rs
index e47b96d146ec..32faa00e0370 100644
--- a/src/catalog/src/information_schema/table_names.rs
+++ b/src/catalog/src/information_schema/table_names.rs
@@ -39,3 +39,5 @@ pub const TRIGGERS: &str = "triggers";
pub const GLOBAL_STATUS: &str = "global_status";
pub const SESSION_STATUS: &str = "session_status";
pub const RUNTIME_METRICS: &str = "runtime_metrics";
+pub const PARTITIONS: &str = "partitions";
+pub const REGION_PEERS: &str = "greptime_region_peers";
diff --git a/src/catalog/src/information_schema/tables.rs b/src/catalog/src/information_schema/tables.rs
index 454dbf9b8871..f55abce61f1f 100644
--- a/src/catalog/src/information_schema/tables.rs
+++ b/src/catalog/src/information_schema/tables.rs
@@ -45,6 +45,7 @@ const TABLE_NAME: &str = "table_name";
const TABLE_TYPE: &str = "table_type";
const TABLE_ID: &str = "table_id";
const ENGINE: &str = "engine";
+const INIT_CAPACITY: usize = 42;
pub(super) struct InformationSchemaTables {
schema: SchemaRef,
@@ -141,12 +142,12 @@ impl InformationSchemaTablesBuilder {
schema,
catalog_name,
catalog_manager,
- catalog_names: StringVectorBuilder::with_capacity(42),
- schema_names: StringVectorBuilder::with_capacity(42),
- table_names: StringVectorBuilder::with_capacity(42),
- table_types: StringVectorBuilder::with_capacity(42),
- table_ids: UInt32VectorBuilder::with_capacity(42),
- engines: StringVectorBuilder::with_capacity(42),
+ catalog_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ schema_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ table_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ table_ids: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
+ engines: StringVectorBuilder::with_capacity(INIT_CAPACITY),
}
}
@@ -160,13 +161,6 @@ impl InformationSchemaTablesBuilder {
let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
- if !catalog_manager
- .schema_exists(&catalog_name, &schema_name)
- .await?
- {
- continue;
- }
-
let mut stream = catalog_manager.tables(&catalog_name, &schema_name).await;
while let Some(table) = stream.try_next().await? {
diff --git a/src/cmd/src/cli/bench.rs b/src/cmd/src/cli/bench.rs
index 6cd7d86a1642..60405e812819 100644
--- a/src/cmd/src/cli/bench.rs
+++ b/src/cmd/src/cli/bench.rs
@@ -156,6 +156,7 @@ fn create_region_routes(regions: Vec<RegionNumber>) -> Vec<RegionRoute> {
}),
follower_peers: vec![],
leader_status: None,
+ leader_down_since: None,
});
}
diff --git a/src/common/catalog/src/consts.rs b/src/common/catalog/src/consts.rs
index 2fd9aa92a89f..ddf834dbc9cd 100644
--- a/src/common/catalog/src/consts.rs
+++ b/src/common/catalog/src/consts.rs
@@ -82,6 +82,10 @@ pub const INFORMATION_SCHEMA_GLOBAL_STATUS_TABLE_ID: u32 = 25;
pub const INFORMATION_SCHEMA_SESSION_STATUS_TABLE_ID: u32 = 26;
/// id for information_schema.RUNTIME_METRICS
pub const INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID: u32 = 27;
+/// id for information_schema.PARTITIONS
+pub const INFORMATION_SCHEMA_PARTITIONS_TABLE_ID: u32 = 28;
+/// id for information_schema.REGION_PEERS
+pub const INFORMATION_SCHEMA_REGION_PEERS_TABLE_ID: u32 = 29;
/// ----- End of information_schema tables -----
pub const MITO_ENGINE: &str = "mito";
diff --git a/src/common/meta/src/key.rs b/src/common/meta/src/key.rs
index 702061c0b0a1..cf7ff84e92ee 100644
--- a/src/common/meta/src/key.rs
+++ b/src/common/meta/src/key.rs
@@ -844,6 +844,7 @@ mod tests {
use std::sync::Arc;
use bytes::Bytes;
+ use common_time::util::current_time_millis;
use futures::TryStreamExt;
use table::metadata::{RawTableInfo, TableInfo};
@@ -910,6 +911,7 @@ mod tests {
leader_peer: Some(Peer::new(datanode, "a2")),
follower_peers: vec![],
leader_status: None,
+ leader_down_since: None,
}
}
@@ -1263,6 +1265,7 @@ mod tests {
leader_peer: Some(Peer::new(datanode, "a2")),
leader_status: Some(RegionStatus::Downgraded),
follower_peers: vec![],
+ leader_down_since: Some(current_time_millis()),
},
RegionRoute {
region: Region {
@@ -1274,6 +1277,7 @@ mod tests {
leader_peer: Some(Peer::new(datanode, "a1")),
leader_status: None,
follower_peers: vec![],
+ leader_down_since: None,
},
];
let table_info: RawTableInfo =
@@ -1314,10 +1318,18 @@ mod tests {
updated_route_value.region_routes().unwrap()[0].leader_status,
Some(RegionStatus::Downgraded)
);
+
+ assert!(updated_route_value.region_routes().unwrap()[0]
+ .leader_down_since
+ .is_some());
+
assert_eq!(
updated_route_value.region_routes().unwrap()[1].leader_status,
Some(RegionStatus::Downgraded)
);
+ assert!(updated_route_value.region_routes().unwrap()[1]
+ .leader_down_since
+ .is_some());
}
async fn assert_datanode_table(
diff --git a/src/common/meta/src/key/table_route.rs b/src/common/meta/src/key/table_route.rs
index 270707d945c2..7886dc799787 100644
--- a/src/common/meta/src/key/table_route.rs
+++ b/src/common/meta/src/key/table_route.rs
@@ -457,7 +457,7 @@ mod tests {
let new_raw_v = format!("{:?}", v);
assert_eq!(
new_raw_v,
- r#"Physical(PhysicalTableRouteValue { region_routes: [RegionRoute { region: Region { id: 1(0, 1), name: "r1", partition: None, attrs: {} }, leader_peer: Some(Peer { id: 2, addr: "a2" }), follower_peers: [], leader_status: None }, RegionRoute { region: Region { id: 1(0, 1), name: "r1", partition: None, attrs: {} }, leader_peer: Some(Peer { id: 2, addr: "a2" }), follower_peers: [], leader_status: None }], version: 0 })"#
+ r#"Physical(PhysicalTableRouteValue { region_routes: [RegionRoute { region: Region { id: 1(0, 1), name: "r1", partition: None, attrs: {} }, leader_peer: Some(Peer { id: 2, addr: "a2" }), follower_peers: [], leader_status: None, leader_down_since: None }, RegionRoute { region: Region { id: 1(0, 1), name: "r1", partition: None, attrs: {} }, leader_peer: Some(Peer { id: 2, addr: "a2" }), follower_peers: [], leader_status: None, leader_down_since: None }], version: 0 })"#
);
}
}
diff --git a/src/common/meta/src/rpc/router.rs b/src/common/meta/src/rpc/router.rs
index b5db5014fcec..31be66f64954 100644
--- a/src/common/meta/src/rpc/router.rs
+++ b/src/common/meta/src/rpc/router.rs
@@ -18,11 +18,13 @@ use api::v1::meta::{
Partition as PbPartition, Peer as PbPeer, Region as PbRegion, Table as PbTable,
TableRoute as PbTableRoute,
};
+use common_time::util::current_time_millis;
use derive_builder::Builder;
use serde::ser::SerializeSeq;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use snafu::OptionExt;
use store_api::storage::{RegionId, RegionNumber};
+use strum::AsRefStr;
use crate::error::{self, Result};
use crate::key::RegionDistribution;
@@ -204,6 +206,7 @@ impl TableRoute {
leader_peer,
follower_peers,
leader_status: None,
+ leader_down_since: None,
});
}
@@ -258,10 +261,25 @@ pub struct RegionRoute {
#[builder(setter(into, strip_option), default)]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub leader_status: Option<RegionStatus>,
+ /// The start time when the leader is in `Downgraded` status.
+ #[serde(default)]
+ #[builder(default = "self.default_leader_down_since()")]
+ pub leader_down_since: Option<i64>,
+}
+
+impl RegionRouteBuilder {
+ fn default_leader_down_since(&self) -> Option<i64> {
+ match self.leader_status {
+ Some(Some(RegionStatus::Downgraded)) => Some(current_time_millis()),
+ _ => None,
+ }
+ }
}
/// The Status of the [Region].
-#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq)]
+/// TODO(dennis): It's better to add more fine-grained statuses such as `PENDING` etc.
+#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, AsRefStr)]
+#[strum(serialize_all = "UPPERCASE")]
pub enum RegionStatus {
/// The following cases in which the [Region] will be downgraded.
///
@@ -292,15 +310,34 @@ impl RegionRoute {
/// **Notes:** Meta Server will stop renewing the lease for the downgraded [Region].
///
pub fn downgrade_leader(&mut self) {
+ self.leader_down_since = Some(current_time_millis());
self.leader_status = Some(RegionStatus::Downgraded)
}
+ /// Returns how long since the leader is in `Downgraded` status.
+ pub fn leader_down_millis(&self) -> Option<i64> {
+ self.leader_down_since
+ .map(|start| current_time_millis() - start)
+ }
+
/// Sets the leader status.
///
/// Returns true if updated.
pub fn set_leader_status(&mut self, status: Option<RegionStatus>) -> bool {
let updated = self.leader_status != status;
+ match (status, updated) {
+ (Some(RegionStatus::Downgraded), true) => {
+ self.leader_down_since = Some(current_time_millis());
+ }
+ (Some(RegionStatus::Downgraded), false) => {
+ // Do nothing if leader is still in `Downgraded` status.
+ }
+ _ => {
+ self.leader_down_since = None;
+ }
+ }
+
self.leader_status = status;
updated
}
@@ -441,6 +478,7 @@ mod tests {
leader_peer: Some(Peer::new(1, "a1")),
follower_peers: vec![Peer::new(2, "a2"), Peer::new(3, "a3")],
leader_status: None,
+ leader_down_since: None,
};
assert!(!region_route.is_leader_downgraded());
@@ -462,6 +500,7 @@ mod tests {
leader_peer: Some(Peer::new(1, "a1")),
follower_peers: vec![Peer::new(2, "a2"), Peer::new(3, "a3")],
leader_status: None,
+ leader_down_since: None,
};
let input = r#"{"region":{"id":2,"name":"r2","partition":null,"attrs":{}},"leader_peer":{"id":1,"addr":"a1"},"follower_peers":[{"id":2,"addr":"a2"},{"id":3,"addr":"a3"}]}"#;
diff --git a/src/meta-srv/src/handler/region_lease_handler.rs b/src/meta-srv/src/handler/region_lease_handler.rs
index eb792cf9ecd2..0a8f917bf6f2 100644
--- a/src/meta-srv/src/handler/region_lease_handler.rs
+++ b/src/meta-srv/src/handler/region_lease_handler.rs
@@ -296,6 +296,7 @@ mod test {
leader_peer: Some(peer.clone()),
follower_peers: vec![follower_peer.clone()],
leader_status: Some(RegionStatus::Downgraded),
+ leader_down_since: Some(1),
},
RegionRoute {
region: Region::new_test(another_region_id),
diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs b/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs
index 62d7f92320fe..2c5a5f61d0d6 100644
--- a/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs
+++ b/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs
@@ -190,6 +190,7 @@ mod tests {
use common_meta::peer::Peer;
use common_meta::region_keeper::MemoryRegionKeeper;
use common_meta::rpc::router::{Region, RegionRoute, RegionStatus};
+ use common_time::util::current_time_millis;
use store_api::storage::RegionId;
use crate::error::Error;
@@ -285,6 +286,7 @@ mod tests {
leader_peer: Some(Peer::empty(1)),
follower_peers: vec![Peer::empty(2), Peer::empty(3)],
leader_status: Some(RegionStatus::Downgraded),
+ leader_down_since: Some(current_time_millis()),
}];
env.create_physical_table_metadata(table_info, region_routes)
@@ -296,6 +298,7 @@ mod tests {
.unwrap();
assert!(!new_region_routes[0].is_leader_downgraded());
+ assert!(new_region_routes[0].leader_down_since.is_none());
assert_eq!(new_region_routes[0].follower_peers, vec![Peer::empty(3)]);
assert_eq!(new_region_routes[0].leader_peer.as_ref().unwrap().id, 2);
}
@@ -316,6 +319,7 @@ mod tests {
leader_peer: Some(Peer::empty(1)),
follower_peers: vec![Peer::empty(5), Peer::empty(3)],
leader_status: Some(RegionStatus::Downgraded),
+ leader_down_since: Some(current_time_millis()),
},
RegionRoute {
region: Region::new_test(RegionId::new(table_id, 2)),
@@ -377,6 +381,7 @@ mod tests {
leader_peer: Some(leader_peer),
follower_peers: vec![Peer::empty(2), Peer::empty(3)],
leader_status: None,
+ leader_down_since: None,
}];
env.create_physical_table_metadata(table_info, region_routes)
@@ -400,6 +405,7 @@ mod tests {
leader_peer: Some(candidate_peer),
follower_peers: vec![Peer::empty(2), Peer::empty(3)],
leader_status: None,
+ leader_down_since: None,
}];
env.create_physical_table_metadata(table_info, region_routes)
@@ -423,6 +429,7 @@ mod tests {
leader_peer: Some(candidate_peer),
follower_peers: vec![Peer::empty(2), Peer::empty(3)],
leader_status: Some(RegionStatus::Downgraded),
+ leader_down_since: None,
}];
env.create_physical_table_metadata(table_info, region_routes)
diff --git a/src/meta-srv/src/test_util.rs b/src/meta-srv/src/test_util.rs
index 3013ac9ad745..4d021fae97fa 100644
--- a/src/meta-srv/src/test_util.rs
+++ b/src/meta-srv/src/test_util.rs
@@ -50,6 +50,7 @@ pub(crate) fn new_region_route(region_id: u64, peers: &[Peer], leader_node: u64)
leader_peer,
follower_peers: vec![],
leader_status: None,
+ leader_down_since: None,
}
}
@@ -132,6 +133,7 @@ pub(crate) async fn prepare_table_region_and_info_value(
}),
follower_peers: vec![],
leader_status: None,
+ leader_down_since: None,
};
// Region distribution:
diff --git a/src/operator/src/tests/partition_manager.rs b/src/operator/src/tests/partition_manager.rs
index dd2a044b51c3..859090a753b8 100644
--- a/src/operator/src/tests/partition_manager.rs
+++ b/src/operator/src/tests/partition_manager.rs
@@ -133,6 +133,7 @@ pub(crate) async fn create_partition_rule_manager(
leader_peer: Some(Peer::new(3, "")),
follower_peers: vec![],
leader_status: None,
+ leader_down_since: None,
},
RegionRoute {
region: Region {
@@ -151,6 +152,7 @@ pub(crate) async fn create_partition_rule_manager(
leader_peer: Some(Peer::new(2, "")),
follower_peers: vec![],
leader_status: None,
+ leader_down_since: None,
},
RegionRoute {
region: Region {
@@ -169,6 +171,7 @@ pub(crate) async fn create_partition_rule_manager(
leader_peer: Some(Peer::new(1, "")),
follower_peers: vec![],
leader_status: None,
+ leader_down_since: None,
},
]),
region_wal_options.clone(),
@@ -200,6 +203,7 @@ pub(crate) async fn create_partition_rule_manager(
leader_peer: None,
follower_peers: vec![],
leader_status: None,
+ leader_down_since: None,
},
RegionRoute {
region: Region {
@@ -221,6 +225,7 @@ pub(crate) async fn create_partition_rule_manager(
leader_peer: None,
follower_peers: vec![],
leader_status: None,
+ leader_down_since: None,
},
RegionRoute {
region: Region {
@@ -239,6 +244,7 @@ pub(crate) async fn create_partition_rule_manager(
leader_peer: None,
follower_peers: vec![],
leader_status: None,
+ leader_down_since: None,
},
]),
region_wal_options,
diff --git a/src/partition/Cargo.toml b/src/partition/Cargo.toml
index d6a747db66ba..4fadc02b590c 100644
--- a/src/partition/Cargo.toml
+++ b/src/partition/Cargo.toml
@@ -17,6 +17,7 @@ datafusion.workspace = true
datafusion-common.workspace = true
datafusion-expr.workspace = true
datatypes.workspace = true
+itertools.workspace = true
lazy_static.workspace = true
meta-client.workspace = true
moka = { workspace = true, features = ["future"] }
diff --git a/src/partition/src/manager.rs b/src/partition/src/manager.rs
index 4e424b595a0e..ab757a3d2edf 100644
--- a/src/partition/src/manager.rs
+++ b/src/partition/src/manager.rs
@@ -65,7 +65,7 @@ impl PartitionRuleManager {
}
}
- async fn find_region_routes(&self, table_id: TableId) -> Result<Vec<RegionRoute>> {
+ pub async fn find_region_routes(&self, table_id: TableId) -> Result<Vec<RegionRoute>> {
let (_, route) = self
.table_route_manager
.get_physical_table_route(table_id)
@@ -74,6 +74,29 @@ impl PartitionRuleManager {
Ok(route.region_routes)
}
+ pub async fn find_region_routes_batch(
+ &self,
+ table_ids: &[TableId],
+ ) -> Result<HashMap<TableId, Vec<RegionRoute>>> {
+ let table_routes = self
+ .table_route_manager
+ .batch_get(table_ids)
+ .await
+ .context(error::TableRouteManagerSnafu)?;
+
+ let mut table_region_routes = HashMap::with_capacity(table_routes.len());
+
+ for (table_id, table_route) in table_routes {
+ let region_routes = table_route
+ .region_routes()
+ .context(error::TableRouteManagerSnafu)?
+ .clone();
+ table_region_routes.insert(table_id, region_routes);
+ }
+
+ Ok(table_region_routes)
+ }
+
pub async fn find_table_partitions(&self, table_id: TableId) -> Result<Vec<PartitionInfo>> {
let region_routes = self.find_region_routes(table_id).await?;
ensure!(
diff --git a/src/partition/src/partition.rs b/src/partition/src/partition.rs
index 1d63277a9fd1..f25ca63da2ae 100644
--- a/src/partition/src/partition.rs
+++ b/src/partition/src/partition.rs
@@ -13,12 +13,13 @@
// limitations under the License.
use std::any::Any;
-use std::fmt::Debug;
+use std::fmt::{Debug, Display, Formatter};
use std::sync::Arc;
use common_meta::rpc::router::Partition as MetaPartition;
use datafusion_expr::Operator;
use datatypes::prelude::Value;
+use itertools::Itertools;
use serde::{Deserialize, Serialize};
use snafu::ResultExt;
use store_api::storage::RegionNumber;
@@ -56,6 +57,29 @@ pub struct PartitionDef {
partition_bounds: Vec<PartitionBound>,
}
+impl Display for PartitionBound {
+ fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
+ match self {
+ Self::Value(v) => write!(f, "{}", v),
+ Self::MaxValue => write!(f, "MAXVALUE"),
+ }
+ }
+}
+
+impl Display for PartitionDef {
+ fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
+ write!(
+ f,
+ "({}) VALUES LESS THAN ({})",
+ self.partition_columns.iter().join(", "),
+ self.partition_bounds
+ .iter()
+ .map(|b| format!("{b}"))
+ .join(", ")
+ )
+ }
+}
+
impl PartitionDef {
pub fn new(partition_columns: Vec<String>, partition_bounds: Vec<PartitionBound>) -> Self {
Self {
@@ -162,6 +186,8 @@ mod tests {
PartitionBound::Value(1_i32.into()),
],
};
+ assert_eq!("(a, b) VALUES LESS THAN (MAXVALUE, 1)", def.to_string());
+
let partition: MetaPartition = def.try_into().unwrap();
assert_eq!(
r#"{"column_list":["a","b"],"value_list":["\"MaxValue\"","{\"Value\":{\"Int32\":1}}"]}"#,
diff --git a/tests/cases/distributed/create/partition.result b/tests/cases/distributed/create/partition.result
new file mode 100644
index 000000000000..9164096e3ed8
--- /dev/null
+++ b/tests/cases/distributed/create/partition.result
@@ -0,0 +1,40 @@
+CREATE TABLE my_table (
+ a INT PRIMARY KEY,
+ b STRING,
+ ts TIMESTAMP TIME INDEX,
+)
+PARTITION BY RANGE COLUMNS (a) (
+ PARTITION p0 VALUES LESS THAN (10),
+ PARTITION p1 VALUES LESS THAN (20),
+ PARTITION p2 VALUES LESS THAN (MAXVALUE),
+);
+
+Affected Rows: 0
+
+-- SQLNESS REPLACE (\d{13}) ID
+SELECT table_catalog, table_schema, table_name, partition_name, partition_expression, greptime_partition_id from information_schema.partitions WHERE table_name = 'my_table' ORDER BY table_catalog, table_schema, table_name, partition_name;
+
++---------------+--------------+------------+----------------+---------------------------------+-----------------------+
+| table_catalog | table_schema | table_name | partition_name | partition_expression | greptime_partition_id |
++---------------+--------------+------------+----------------+---------------------------------+-----------------------+
+| greptime | public | my_table | p0 | (a) VALUES LESS THAN (10) | ID |
+| greptime | public | my_table | p1 | (a) VALUES LESS THAN (20) | ID |
+| greptime | public | my_table | p2 | (a) VALUES LESS THAN (MAXVALUE) | ID |
++---------------+--------------+------------+----------------+---------------------------------+-----------------------+
+
+-- SQLNESS REPLACE (\d{13}) REGION_ID
+-- SQLNESS REPLACE (\d{1}) PEER_ID
+SELECT region_id, peer_id, is_leader, status FROM information_schema.greptime_region_peers ORDER BY peer_id;
+
++---------------+---------+-----------+--------+
+| region_id | peer_id | is_leader | status |
++---------------+---------+-----------+--------+
+| REGION_ID | PEER_ID | Yes | ALIVE |
+| REGION_ID | PEER_ID | Yes | ALIVE |
+| REGION_ID | PEER_ID | Yes | ALIVE |
++---------------+---------+-----------+--------+
+
+DROP TABLE my_table;
+
+Affected Rows: 0
+
diff --git a/tests/cases/distributed/create/partition.sql b/tests/cases/distributed/create/partition.sql
new file mode 100644
index 000000000000..df5f522f425b
--- /dev/null
+++ b/tests/cases/distributed/create/partition.sql
@@ -0,0 +1,19 @@
+CREATE TABLE my_table (
+ a INT PRIMARY KEY,
+ b STRING,
+ ts TIMESTAMP TIME INDEX,
+)
+PARTITION BY RANGE COLUMNS (a) (
+ PARTITION p0 VALUES LESS THAN (10),
+ PARTITION p1 VALUES LESS THAN (20),
+ PARTITION p2 VALUES LESS THAN (MAXVALUE),
+);
+
+-- SQLNESS REPLACE (\d{13}) ID
+SELECT table_catalog, table_schema, table_name, partition_name, partition_expression, greptime_partition_id from information_schema.partitions WHERE table_name = 'my_table' ORDER BY table_catalog, table_schema, table_name, partition_name;
+
+-- SQLNESS REPLACE (\d{13}) REGION_ID
+-- SQLNESS REPLACE (\d{1}) PEER_ID
+SELECT region_id, peer_id, is_leader, status FROM information_schema.greptime_region_peers ORDER BY peer_id;
+
+DROP TABLE my_table;
diff --git a/tests/cases/standalone/common/show/show_databases_tables.result b/tests/cases/standalone/common/show/show_databases_tables.result
index c61743d193a3..269983b36b33 100644
--- a/tests/cases/standalone/common/show/show_databases_tables.result
+++ b/tests/cases/standalone/common/show/show_databases_tables.result
@@ -34,6 +34,7 @@ show tables;
| key_column_usage |
| optimizer_trace |
| parameters |
+| partitions |
| profiling |
| referential_constraints |
| routines |
diff --git a/tests/cases/standalone/common/system/information_schema.result b/tests/cases/standalone/common/system/information_schema.result
index 1d39d272930a..808b991dbaff 100644
--- a/tests/cases/standalone/common/system/information_schema.result
+++ b/tests/cases/standalone/common/system/information_schema.result
@@ -26,6 +26,7 @@ order by table_schema, table_name;
| greptime | information_schema | key_column_usage | LOCAL TEMPORARY | 16 | |
| greptime | information_schema | optimizer_trace | LOCAL TEMPORARY | 17 | |
| greptime | information_schema | parameters | LOCAL TEMPORARY | 18 | |
+| greptime | information_schema | partitions | LOCAL TEMPORARY | 28 | |
| greptime | information_schema | profiling | LOCAL TEMPORARY | 19 | |
| greptime | information_schema | referential_constraints | LOCAL TEMPORARY | 20 | |
| greptime | information_schema | routines | LOCAL TEMPORARY | 21 | |
@@ -182,6 +183,32 @@ select * from information_schema.columns order by table_schema, table_name, colu
| greptime | information_schema | parameters | specific_catalog | String | FIELD | | No | String | |
| greptime | information_schema | parameters | specific_name | String | FIELD | | No | String | |
| greptime | information_schema | parameters | specific_schema | String | FIELD | | No | String | |
+| greptime | information_schema | partitions | avg_row_length | Int64 | FIELD | | Yes | Int64 | |
+| greptime | information_schema | partitions | check_time | DateTime | FIELD | | Yes | DateTime | |
+| greptime | information_schema | partitions | checksum | Int64 | FIELD | | Yes | Int64 | |
+| greptime | information_schema | partitions | create_time | DateTime | FIELD | | Yes | DateTime | |
+| greptime | information_schema | partitions | data_free | Int64 | FIELD | | Yes | Int64 | |
+| greptime | information_schema | partitions | data_length | Int64 | FIELD | | Yes | Int64 | |
+| greptime | information_schema | partitions | greptime_partition_id | UInt64 | FIELD | | Yes | UInt64 | |
+| greptime | information_schema | partitions | index_length | Int64 | FIELD | | Yes | Int64 | |
+| greptime | information_schema | partitions | max_data_length | Int64 | FIELD | | Yes | Int64 | |
+| greptime | information_schema | partitions | nodegroup | String | FIELD | | Yes | String | |
+| greptime | information_schema | partitions | partition_comment | String | FIELD | | Yes | String | |
+| greptime | information_schema | partitions | partition_description | String | FIELD | | Yes | String | |
+| greptime | information_schema | partitions | partition_expression | String | FIELD | | Yes | String | |
+| greptime | information_schema | partitions | partition_method | String | FIELD | | Yes | String | |
+| greptime | information_schema | partitions | partition_name | String | FIELD | | No | String | |
+| greptime | information_schema | partitions | partition_ordinal_position | Int64 | FIELD | | Yes | Int64 | |
+| greptime | information_schema | partitions | subpartition_expression | String | FIELD | | Yes | String | |
+| greptime | information_schema | partitions | subpartition_method | String | FIELD | | Yes | String | |
+| greptime | information_schema | partitions | subpartition_name | String | FIELD | | Yes | String | |
+| greptime | information_schema | partitions | subpartition_ordinal_position | Int64 | FIELD | | Yes | Int64 | |
+| greptime | information_schema | partitions | table_catalog | String | FIELD | | No | String | |
+| greptime | information_schema | partitions | table_name | String | FIELD | | No | String | |
+| greptime | information_schema | partitions | table_rows | Int64 | FIELD | | Yes | Int64 | |
+| greptime | information_schema | partitions | table_schema | String | FIELD | | No | String | |
+| greptime | information_schema | partitions | tablespace_name | String | FIELD | | Yes | String | |
+| greptime | information_schema | partitions | update_time | DateTime | FIELD | | Yes | DateTime | |
| greptime | information_schema | profiling | block_ops_in | Int64 | FIELD | | No | Int64 | |
| greptime | information_schema | profiling | block_ops_out | Int64 | FIELD | | No | Int64 | |
| greptime | information_schema | profiling | context_involuntary | Int64 | FIELD | | No | Int64 | |
@@ -528,7 +555,7 @@ select * from key_column_usage;
+--------------------+-------------------+-----------------+---------------+--------------+------------+-------------+------------------+-------------------------------+-------------------------+-----------------------+------------------------+
-- tables not implemented
-desc table COLUMN_PRIVILEGES;
+DESC TABLE COLUMN_PRIVILEGES;
+----------------+--------+-----+------+---------+---------------+
| Column | Type | Key | Null | Default | Semantic Type |
@@ -542,14 +569,14 @@ desc table COLUMN_PRIVILEGES;
| is_grantable | String | | NO | | FIELD |
+----------------+--------+-----+------+---------+---------------+
-select * from COLUMN_PRIVILEGES;
+SELECT * FROM COLUMN_PRIVILEGES;
+---------+---------------+--------------+------------+-------------+----------------+--------------+
| grantee | table_catalog | table_schema | table_name | column_name | privilege_type | is_grantable |
+---------+---------------+--------------+------------+-------------+----------------+--------------+
+---------+---------------+--------------+------------+-------------+----------------+--------------+
-desc table COLUMN_STATISTICS;
+DESC TABLE COLUMN_STATISTICS;
+-------------+--------+-----+------+---------+---------------+
| Column | Type | Key | Null | Default | Semantic Type |
@@ -560,14 +587,14 @@ desc table COLUMN_STATISTICS;
| histogram | String | | NO | | FIELD |
+-------------+--------+-----+------+---------+---------------+
-select * from COLUMN_STATISTICS;
+SELECT * FROM COLUMN_STATISTICS;
+-------------+------------+-------------+-----------+
| schema_name | table_name | column_name | histogram |
+-------------+------------+-------------+-----------+
+-------------+------------+-------------+-----------+
-select * from CHARACTER_SETS;
+SELECT * FROM CHARACTER_SETS;
+--------------------+----------------------+---------------+--------+
| character_set_name | default_collate_name | description | maxlen |
@@ -575,7 +602,7 @@ select * from CHARACTER_SETS;
| utf8 | utf8_bin | UTF-8 Unicode | 4 |
+--------------------+----------------------+---------------+--------+
-select * from COLLATIONS;
+SELECT * FROM COLLATIONS;
+----------------+--------------------+----+------------+-------------+---------+
| collation_name | character_set_name | id | is_default | is_compiled | sortlen |
@@ -583,7 +610,7 @@ select * from COLLATIONS;
| utf8_bin | utf8 | 1 | Yes | Yes | 1 |
+----------------+--------------------+----+------------+-------------+---------+
-select * from COLLATION_CHARACTER_SET_APPLICABILITY;
+SELECT * FROM COLLATION_CHARACTER_SET_APPLICABILITY;
+----------------+--------------------+
| collation_name | character_set_name |
@@ -591,7 +618,7 @@ select * from COLLATION_CHARACTER_SET_APPLICABILITY;
| utf8_bin | utf8 |
+----------------+--------------------+
-desc table CHECK_CONSTRAINTS;
+DESC TABLE CHECK_CONSTRAINTS;
+--------------------+--------+-----+------+---------+---------------+
| Column | Type | Key | Null | Default | Semantic Type |
@@ -602,14 +629,14 @@ desc table CHECK_CONSTRAINTS;
| check_clause | String | | NO | | FIELD |
+--------------------+--------+-----+------+---------+---------------+
-select * from CHECK_CONSTRAINTS;
+SELECT * FROM CHECK_CONSTRAINTS;
+--------------------+-------------------+-----------------+--------------+
| constraint_catalog | constraint_schema | constraint_name | check_clause |
+--------------------+-------------------+-----------------+--------------+
+--------------------+-------------------+-----------------+--------------+
-desc table RUNTIME_METRICS;
+DESC TABLE RUNTIME_METRICS;
+-------------+----------------------+-----+------+---------+---------------+
| Column | Type | Key | Null | Default | Semantic Type |
@@ -622,6 +649,19 @@ desc table RUNTIME_METRICS;
| timestamp | TimestampMillisecond | | NO | | FIELD |
+-------------+----------------------+-----+------+---------+---------------+
+DESC TABLE GREPTIME_REGION_PEERS;
+
++--------------+--------+-----+------+---------+---------------+
+| Column | Type | Key | Null | Default | Semantic Type |
++--------------+--------+-----+------+---------+---------------+
+| region_id | UInt64 | | NO | | FIELD |
+| peer_id | UInt64 | | YES | | FIELD |
+| peer_addr | String | | YES | | FIELD |
+| is_leader | String | | YES | | FIELD |
+| status | String | | YES | | FIELD |
+| down_seconds | Int64 | | YES | | FIELD |
++--------------+--------+-----+------+---------+---------------+
+
drop table my_db.foo;
Affected Rows: 0
diff --git a/tests/cases/standalone/common/system/information_schema.sql b/tests/cases/standalone/common/system/information_schema.sql
index 625391344aa3..76261d1c665b 100644
--- a/tests/cases/standalone/common/system/information_schema.sql
+++ b/tests/cases/standalone/common/system/information_schema.sql
@@ -97,25 +97,27 @@ desc table key_column_usage;
select * from key_column_usage;
-- tables not implemented
-desc table COLUMN_PRIVILEGES;
+DESC TABLE COLUMN_PRIVILEGES;
-select * from COLUMN_PRIVILEGES;
+SELECT * FROM COLUMN_PRIVILEGES;
-desc table COLUMN_STATISTICS;
+DESC TABLE COLUMN_STATISTICS;
-select * from COLUMN_STATISTICS;
+SELECT * FROM COLUMN_STATISTICS;
-select * from CHARACTER_SETS;
+SELECT * FROM CHARACTER_SETS;
-select * from COLLATIONS;
+SELECT * FROM COLLATIONS;
-select * from COLLATION_CHARACTER_SET_APPLICABILITY;
+SELECT * FROM COLLATION_CHARACTER_SET_APPLICABILITY;
-desc table CHECK_CONSTRAINTS;
+DESC TABLE CHECK_CONSTRAINTS;
-select * from CHECK_CONSTRAINTS;
+SELECT * FROM CHECK_CONSTRAINTS;
-desc table RUNTIME_METRICS;
+DESC TABLE RUNTIME_METRICS;
+
+DESC TABLE GREPTIME_REGION_PEERS;
drop table my_db.foo;
|
feat
|
impl partitions and region_peers information schema (#3278)
|
63d5a69a3163f9d66a3ec08edd8231793c6aa439
|
2025-02-13 10:02:24
|
Weny Xu
|
fix(query_range): skip data field on errors (#5520)
| false
|
diff --git a/src/servers/src/http/prometheus.rs b/src/servers/src/http/prometheus.rs
index d6f205ee9e62..5455591f1735 100644
--- a/src/servers/src/http/prometheus.rs
+++ b/src/servers/src/http/prometheus.rs
@@ -95,7 +95,7 @@ pub struct PromData {
pub result: PromQueryResult,
}
-#[derive(Debug, Serialize, Deserialize, PartialEq)]
+#[derive(Debug, Default, Serialize, Deserialize, PartialEq)]
#[serde(untagged)]
pub enum PrometheusResponse {
PromData(PromData),
@@ -106,6 +106,8 @@ pub enum PrometheusResponse {
BuildInfo(OwnedBuildInfo),
#[serde(skip_deserializing)]
ParseResult(promql_parser::parser::Expr),
+ #[default]
+ None,
}
impl PrometheusResponse {
@@ -144,11 +146,9 @@ impl PrometheusResponse {
}
}
}
-}
-impl Default for PrometheusResponse {
- fn default() -> Self {
- PrometheusResponse::PromData(Default::default())
+ pub fn is_none(&self) -> bool {
+ matches!(self, PrometheusResponse::None)
}
}
diff --git a/src/servers/src/http/result/prometheus_resp.rs b/src/servers/src/http/result/prometheus_resp.rs
index 8aa28011a274..eb076f88baee 100644
--- a/src/servers/src/http/result/prometheus_resp.rs
+++ b/src/servers/src/http/result/prometheus_resp.rs
@@ -42,6 +42,8 @@ use crate::http::prometheus::{
#[derive(Debug, Default, Serialize, Deserialize, PartialEq)]
pub struct PrometheusJsonResponse {
pub status: String,
+ #[serde(skip_serializing_if = "PrometheusResponse::is_none")]
+ #[serde(default)]
pub data: PrometheusResponse,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<String>,
@@ -90,7 +92,7 @@ impl PrometheusJsonResponse {
{
PrometheusJsonResponse {
status: "error".to_string(),
- data: PrometheusResponse::default(),
+ data: PrometheusResponse::None,
error: Some(reason.into()),
error_type: Some(error_type.to_string()),
warnings: None,
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index db8ee39e107b..d9e4c02db908 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -766,7 +766,7 @@ pub async fn test_prom_http_api(store_type: StorageType) {
.await;
assert_eq!(res.status(), StatusCode::BAD_REQUEST);
let data = res.text().await;
- let expected = "{\"status\":\"error\",\"data\":{\"resultType\":\"\",\"result\":[]},\"error\":\"invalid promql query\",\"errorType\":\"InvalidArguments\"}";
+ let expected = "{\"status\":\"error\",\"error\":\"invalid promql query\",\"errorType\":\"InvalidArguments\"}";
assert_eq!(expected, data);
// range_query with __name__ not-equal matcher
|
fix
|
skip data field on errors (#5520)
|
833216d3173073d3b90199a1decaadfdde320aa5
|
2022-12-07 09:07:59
|
LFC
|
refactor: directly invoke Datanode methods in standalone mode (part 1) (#694)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 33df1779b8e8..483297a9642e 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1984,7 +1984,6 @@ dependencies = [
"datafusion",
"datafusion-common 7.0.0",
"datatypes",
- "frontend",
"futures",
"hyper",
"log-store",
diff --git a/src/cmd/src/error.rs b/src/cmd/src/error.rs
index 7856c66e1676..c57cda3f9736 100644
--- a/src/cmd/src/error.rs
+++ b/src/cmd/src/error.rs
@@ -25,12 +25,6 @@ pub enum Error {
source: datanode::error::Error,
},
- #[snafu(display("Failed to build frontend, source: {}", source))]
- BuildFrontend {
- #[snafu(backtrace)]
- source: frontend::error::Error,
- },
-
#[snafu(display("Failed to start frontend, source: {}", source))]
StartFrontend {
#[snafu(backtrace)]
@@ -75,7 +69,6 @@ impl ErrorExt for Error {
StatusCode::InvalidArguments
}
Error::IllegalConfig { .. } => StatusCode::InvalidArguments,
- Error::BuildFrontend { source, .. } => source.status_code(),
}
}
diff --git a/src/cmd/src/frontend.rs b/src/cmd/src/frontend.rs
index 100f411d305d..e395d0912b0f 100644
--- a/src/cmd/src/frontend.rs
+++ b/src/cmd/src/frontend.rs
@@ -78,7 +78,7 @@ impl StartCommand {
let opts: FrontendOptions = self.try_into()?;
let mut frontend = Frontend::new(
opts.clone(),
- Instance::try_new(&opts)
+ Instance::try_new_distributed(&opts)
.await
.context(error::StartFrontendSnafu)?,
);
@@ -213,7 +213,6 @@ mod tests {
let fe_opts = FrontendOptions::try_from(command).unwrap();
assert_eq!(Mode::Distributed, fe_opts.mode);
- assert_eq!("127.0.0.1:3001".to_string(), fe_opts.datanode_rpc_addr);
assert_eq!(
"127.0.0.1:4000".to_string(),
fe_opts.http_options.as_ref().unwrap().addr
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index b3a86e3fb3eb..e72166b30359 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -28,11 +28,8 @@ use serde::{Deserialize, Serialize};
use servers::http::HttpOptions;
use servers::Mode;
use snafu::ResultExt;
-use tokio::try_join;
-use crate::error::{
- BuildFrontendSnafu, Error, IllegalConfigSnafu, Result, StartDatanodeSnafu, StartFrontendSnafu,
-};
+use crate::error::{Error, IllegalConfigSnafu, Result, StartDatanodeSnafu, StartFrontendSnafu};
use crate::toml_loader;
#[derive(Parser)]
@@ -104,7 +101,6 @@ impl StandaloneOptions {
influxdb_options: self.influxdb_options,
prometheus_options: self.prometheus_options,
mode: self.mode,
- datanode_rpc_addr: "127.0.0.1:3001".to_string(),
meta_client_opts: None,
}
}
@@ -162,7 +158,7 @@ impl StartCommand {
let mut datanode = Datanode::new(dn_opts.clone())
.await
.context(StartDatanodeSnafu)?;
- let mut frontend = build_frontend(fe_opts, &dn_opts, datanode.get_instance()).await?;
+ let mut frontend = build_frontend(fe_opts, datanode.get_instance()).await?;
// Start datanode instance before starting services, to avoid requests come in before internal components are started.
datanode
@@ -171,11 +167,7 @@ impl StartCommand {
.context(StartDatanodeSnafu)?;
info!("Datanode instance started");
- try_join!(
- async { datanode.start_services().await.context(StartDatanodeSnafu) },
- async { frontend.start().await.context(StartFrontendSnafu) }
- )?;
-
+ frontend.start().await.context(StartFrontendSnafu)?;
Ok(())
}
}
@@ -183,17 +175,9 @@ impl StartCommand {
/// Build frontend instance in standalone mode
async fn build_frontend(
fe_opts: FrontendOptions,
- dn_opts: &DatanodeOptions,
datanode_instance: InstanceRef,
) -> Result<Frontend<FeInstance>> {
- let grpc_server_addr = &dn_opts.rpc_addr;
- info!(
- "Build frontend with datanode gRPC addr: {}",
- grpc_server_addr
- );
- let mut frontend_instance = FeInstance::try_new(&fe_opts)
- .await
- .context(BuildFrontendSnafu)?;
+ let mut frontend_instance = FeInstance::new_standalone(datanode_instance.clone());
frontend_instance.set_catalog_manager(datanode_instance.catalog_manager().clone());
frontend_instance.set_script_handler(datanode_instance);
Ok(Frontend::new(fe_opts, frontend_instance))
@@ -289,7 +273,6 @@ mod tests {
let fe_opts = FrontendOptions::try_from(cmd).unwrap();
assert_eq!(Mode::Standalone, fe_opts.mode);
- assert_eq!("127.0.0.1:3001".to_string(), fe_opts.datanode_rpc_addr);
assert_eq!(
"127.0.0.1:4000".to_string(),
fe_opts.http_options.as_ref().unwrap().addr
diff --git a/src/common/grpc/src/select.rs b/src/common/grpc/src/select.rs
index 0801370dbd17..516f697d3bb1 100644
--- a/src/common/grpc/src/select.rs
+++ b/src/common/grpc/src/select.rs
@@ -23,7 +23,7 @@ use common_base::BitVec;
use common_error::prelude::ErrorExt;
use common_error::status_code::StatusCode;
use common_query::Output;
-use common_recordbatch::{util, RecordBatches, SendableRecordBatchStream};
+use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
use datatypes::arrow::array::{Array, BooleanArray, PrimitiveArray};
use datatypes::arrow_array::{BinaryArray, StringArray};
use datatypes::schema::SchemaRef;
@@ -47,13 +47,9 @@ pub async fn to_object_result(output: std::result::Result<Output, impl ErrorExt>
}
}
async fn collect(stream: SendableRecordBatchStream) -> Result<ObjectResult> {
- let schema = stream.schema();
-
- let recordbatches = util::collect(stream)
+ let recordbatches = RecordBatches::try_collect(stream)
.await
- .and_then(|batches| RecordBatches::try_new(schema, batches))
.context(error::CollectRecordBatchesSnafu)?;
-
let object_result = build_result(recordbatches)?;
Ok(object_result)
}
diff --git a/src/common/recordbatch/src/lib.rs b/src/common/recordbatch/src/lib.rs
index ce2c2f1e5a90..2809040326ff 100644
--- a/src/common/recordbatch/src/lib.rs
+++ b/src/common/recordbatch/src/lib.rs
@@ -27,7 +27,7 @@ use datatypes::prelude::VectorRef;
use datatypes::schema::{Schema, SchemaRef};
use error::Result;
use futures::task::{Context, Poll};
-use futures::Stream;
+use futures::{Stream, TryStreamExt};
pub use recordbatch::RecordBatch;
use snafu::ensure;
@@ -80,6 +80,12 @@ impl RecordBatches {
Ok(Self { schema, batches })
}
+ pub async fn try_collect(stream: SendableRecordBatchStream) -> Result<Self> {
+ let schema = stream.schema();
+ let batches = stream.try_collect::<Vec<_>>().await?;
+ Ok(Self { schema, batches })
+ }
+
#[inline]
pub fn empty() -> Self {
Self {
diff --git a/src/datanode/Cargo.toml b/src/datanode/Cargo.toml
index 159ec0ef4461..47f34d218625 100644
--- a/src/datanode/Cargo.toml
+++ b/src/datanode/Cargo.toml
@@ -29,7 +29,6 @@ datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch =
"simd",
] }
datatypes = { path = "../datatypes" }
-frontend = { path = "../frontend" }
futures = "0.3"
hyper = { version = "0.14", features = ["full"] }
log-store = { path = "../log-store" }
diff --git a/src/frontend/Cargo.toml b/src/frontend/Cargo.toml
index 5e3eee4b946c..452131d0d71e 100644
--- a/src/frontend/Cargo.toml
+++ b/src/frontend/Cargo.toml
@@ -26,7 +26,7 @@ datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch =
] }
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2" }
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2" }
-
+datanode = { path = "../datanode" }
datatypes = { path = "../datatypes" }
futures = "0.3"
futures-util = "0.3"
diff --git a/src/frontend/src/catalog.rs b/src/frontend/src/catalog.rs
index aea667367f6b..0c66980334ea 100644
--- a/src/frontend/src/catalog.rs
+++ b/src/frontend/src/catalog.rs
@@ -59,6 +59,16 @@ impl FrontendCatalogManager {
pub(crate) fn backend(&self) -> KvBackendRef {
self.backend.clone()
}
+
+ #[cfg(test)]
+ pub(crate) fn table_routes(&self) -> Arc<TableRoutes> {
+ self.table_routes.clone()
+ }
+
+ #[cfg(test)]
+ pub(crate) fn datanode_clients(&self) -> Arc<DatanodeClients> {
+ self.datanode_clients.clone()
+ }
}
// FIXME(hl): Frontend only needs a CatalogList, should replace with trait upcasting
diff --git a/src/frontend/src/error.rs b/src/frontend/src/error.rs
index 823ce693ceed..eae56a12f837 100644
--- a/src/frontend/src/error.rs
+++ b/src/frontend/src/error.rs
@@ -244,18 +244,6 @@ pub enum Error {
source: client::Error,
},
- #[snafu(display("Failed to alter table, source: {}", source))]
- AlterTable {
- #[snafu(backtrace)]
- source: client::Error,
- },
-
- #[snafu(display("Failed to drop table, source: {}", source))]
- DropTable {
- #[snafu(backtrace)]
- source: client::Error,
- },
-
#[snafu(display("Failed to insert values to table, source: {}", source))]
Insert {
#[snafu(backtrace)]
@@ -398,9 +386,6 @@ pub enum Error {
source: query::error::Error,
},
- #[snafu(display("Unsupported expr type: {}", name))]
- UnsupportedExpr { name: String, backtrace: Backtrace },
-
#[snafu(display("Failed to do vector computation, source: {}", source))]
VectorComputation {
#[snafu(backtrace)]
@@ -451,6 +436,12 @@ pub enum Error {
#[snafu(backtrace)]
source: substrait::error::Error,
},
+
+ #[snafu(display("Failed to invoke GRPC server, source: {}", source))]
+ InvokeGrpcServer {
+ #[snafu(backtrace)]
+ source: servers::error::Error,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -470,7 +461,9 @@ impl ErrorExt for Error {
Error::RuntimeResource { source, .. } => source.status_code(),
- Error::StartServer { source, .. } => source.status_code(),
+ Error::StartServer { source, .. } | Error::InvokeGrpcServer { source } => {
+ source.status_code()
+ }
Error::ParseSql { source } => source.status_code(),
@@ -500,7 +493,6 @@ impl ErrorExt for Error {
| Error::FindLeaderPeer { .. }
| Error::FindRegionPartition { .. }
| Error::IllegalTableRoutesData { .. }
- | Error::UnsupportedExpr { .. }
| Error::BuildDfLogicalPlan { .. } => StatusCode::Internal,
Error::IllegalFrontendState { .. } | Error::IncompleteGrpcResult { .. } => {
@@ -522,8 +514,6 @@ impl ErrorExt for Error {
Error::SchemaNotFound { .. } => StatusCode::InvalidArguments,
Error::CatalogNotFound { .. } => StatusCode::InvalidArguments,
Error::CreateTable { source, .. }
- | Error::AlterTable { source, .. }
- | Error::DropTable { source }
| Error::Select { source, .. }
| Error::CreateDatabase { source, .. }
| Error::CreateTableOnInsertion { source, .. }
diff --git a/src/frontend/src/frontend.rs b/src/frontend/src/frontend.rs
index 521ed6c83410..92f8fa3b0d63 100644
--- a/src/frontend/src/frontend.rs
+++ b/src/frontend/src/frontend.rs
@@ -40,7 +40,6 @@ pub struct FrontendOptions {
pub influxdb_options: Option<InfluxdbOptions>,
pub prometheus_options: Option<PrometheusOptions>,
pub mode: Mode,
- pub datanode_rpc_addr: String,
pub meta_client_opts: Option<MetaClientOpts>,
}
@@ -55,18 +54,11 @@ impl Default for FrontendOptions {
influxdb_options: Some(InfluxdbOptions::default()),
prometheus_options: Some(PrometheusOptions::default()),
mode: Mode::Standalone,
- datanode_rpc_addr: "127.0.0.1:3001".to_string(),
meta_client_opts: None,
}
}
}
-impl FrontendOptions {
- pub(crate) fn datanode_grpc_addr(&self) -> String {
- self.datanode_rpc_addr.clone()
- }
-}
-
pub struct Frontend<T>
where
T: FrontendInstance,
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index b1c04389a7ae..161682515753 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -20,50 +20,49 @@ mod prometheus;
use std::sync::Arc;
use std::time::Duration;
-use api::result::ObjectResultBuilder;
+use api::result::{ObjectResultBuilder, PROTOCOL_VERSION};
use api::v1::alter_expr::Kind;
use api::v1::object_expr::Expr;
use api::v1::{
- admin_expr, select_expr, AddColumns, AdminExpr, AdminResult, AlterExpr, Column,
- CreateDatabaseExpr, CreateExpr, DropTableExpr, InsertExpr, ObjectExpr,
+ admin_expr, AddColumns, AdminExpr, AdminResult, AlterExpr, Column, CreateDatabaseExpr,
+ CreateExpr, DropTableExpr, ExprHeader, InsertExpr, ObjectExpr,
ObjectResult as GrpcObjectResult,
};
use async_trait::async_trait;
use catalog::remote::MetaKvBackend;
use catalog::{CatalogManagerRef, CatalogProviderRef, SchemaProviderRef};
-use client::admin::{admin_result_to_output, Admin};
-use client::{Client, Database, Select};
-use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
-use common_error::prelude::{BoxedError, StatusCode};
+use client::admin::admin_result_to_output;
+use client::ObjectResult;
+use common_catalog::consts::DEFAULT_CATALOG_NAME;
+use common_error::prelude::BoxedError;
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
-use common_grpc::select::to_object_result;
use common_query::Output;
use common_recordbatch::RecordBatches;
-use common_telemetry::{debug, error, info};
+use common_telemetry::{debug, info};
+use datanode::instance::InstanceRef as DnInstanceRef;
use distributed::DistInstance;
-use meta_client::client::MetaClientBuilder;
+use meta_client::client::{MetaClient, MetaClientBuilder};
use meta_client::MetaClientOpts;
use servers::query_handler::{
- GrpcAdminHandler, GrpcQueryHandler, InfluxdbLineProtocolHandler, OpentsdbProtocolHandler,
- PrometheusProtocolHandler, ScriptHandler, ScriptHandlerRef, SqlQueryHandler,
+ GrpcAdminHandler, GrpcAdminHandlerRef, GrpcQueryHandler, GrpcQueryHandlerRef,
+ InfluxdbLineProtocolHandler, OpentsdbProtocolHandler, PrometheusProtocolHandler, ScriptHandler,
+ ScriptHandlerRef, SqlQueryHandler, SqlQueryHandlerRef,
};
use servers::{error as server_error, Mode};
-use session::context::{QueryContext, QueryContextRef};
+use session::context::QueryContextRef;
use snafu::prelude::*;
use sql::dialect::GenericDialect;
use sql::parser::ParserContext;
use sql::statements::create::Partitions;
-use sql::statements::explain::Explain;
use sql::statements::insert::Insert;
use sql::statements::statement::Statement;
use crate::catalog::FrontendCatalogManager;
use crate::datanode::DatanodeClients;
use crate::error::{
- self, AlterTableOnInsertionSnafu, AlterTableSnafu, CatalogNotFoundSnafu, CatalogSnafu,
- CreateDatabaseSnafu, CreateTableSnafu, DropTableSnafu, FindNewColumnsOnInsertionSnafu,
- InsertSnafu, MissingMetasrvOptsSnafu, Result, SchemaNotFoundSnafu, SelectSnafu,
- UnsupportedExprSnafu,
+ self, AlterTableOnInsertionSnafu, CatalogNotFoundSnafu, CatalogSnafu, CreateDatabaseSnafu,
+ CreateTableSnafu, FindNewColumnsOnInsertionSnafu, InsertSnafu, MissingMetasrvOptsSnafu, Result,
+ SchemaNotFoundSnafu,
};
use crate::expr_factory::{CreateExprFactoryRef, DefaultCreateExprFactory};
use crate::frontend::FrontendOptions;
@@ -91,9 +90,6 @@ pub type FrontendInstanceRef = Arc<dyn FrontendInstance>;
#[derive(Clone)]
pub struct Instance {
- // TODO(hl): In standalone mode, there is only one client.
- // But in distribute mode, frontend should fetch datanodes' addresses from metasrv.
- client: Client,
/// catalog manager is None in standalone mode, datanode will keep their own
catalog_manager: Option<CatalogManagerRef>,
/// Script handler is None in distributed mode, only works on standalone mode.
@@ -103,94 +99,87 @@ pub struct Instance {
// Standalone and Distributed, then the code behind it doesn't need to use so
// many match statements.
mode: Mode,
- // TODO(LFC): Refactor consideration: Can we split Frontend to DistInstance and EmbedInstance?
+
+ // TODO(LFC): Remove `dist_instance` together with Arrow Flight adoption refactor.
dist_instance: Option<DistInstance>,
-}
-impl Default for Instance {
- fn default() -> Self {
- Self {
- client: Client::default(),
- catalog_manager: None,
- script_handler: None,
- create_expr_factory: Arc::new(DefaultCreateExprFactory {}),
- mode: Mode::Standalone,
- dist_instance: None,
- }
- }
+ sql_handler: SqlQueryHandlerRef,
+ grpc_query_handler: GrpcQueryHandlerRef,
+ grpc_admin_handler: GrpcAdminHandlerRef,
}
impl Instance {
- pub async fn try_new(opts: &FrontendOptions) -> Result<Self> {
- let mut instance = Instance {
- mode: opts.mode.clone(),
- ..Default::default()
- };
-
- let addr = opts.datanode_grpc_addr();
- instance.client.start(vec![addr]);
-
- instance.dist_instance = match &opts.mode {
- Mode::Standalone => None,
- Mode::Distributed => {
- let metasrv_addr = &opts
- .meta_client_opts
- .as_ref()
- .context(MissingMetasrvOptsSnafu)?
- .metasrv_addrs;
- info!(
- "Creating Frontend instance in distributed mode with Meta server addr {:?}",
- metasrv_addr
- );
-
- let meta_config = MetaClientOpts::default();
- let channel_config = ChannelConfig::new()
- .timeout(Duration::from_millis(meta_config.timeout_millis))
- .connect_timeout(Duration::from_millis(meta_config.connect_timeout_millis))
- .tcp_nodelay(meta_config.tcp_nodelay);
-
- let channel_manager = ChannelManager::with_config(channel_config);
-
- let mut meta_client = MetaClientBuilder::new(0, 0)
- .enable_router()
- .enable_store()
- .channel_manager(channel_manager)
- .build();
- meta_client
- .start(metasrv_addr)
- .await
- .context(error::StartMetaClientSnafu)?;
- let meta_client = Arc::new(meta_client);
-
- let meta_backend = Arc::new(MetaKvBackend {
- client: meta_client.clone(),
- });
- let table_routes = Arc::new(TableRoutes::new(meta_client.clone()));
- let datanode_clients = Arc::new(DatanodeClients::new());
- let catalog_manager = Arc::new(FrontendCatalogManager::new(
- meta_backend,
- table_routes,
- datanode_clients.clone(),
- ));
-
- instance.catalog_manager = Some(catalog_manager.clone());
-
- Some(DistInstance::new(
- meta_client,
- catalog_manager,
- datanode_clients,
- ))
- }
- };
- Ok(instance)
+ pub async fn try_new_distributed(opts: &FrontendOptions) -> Result<Self> {
+ let meta_client = Self::create_meta_client(opts).await?;
+
+ let meta_backend = Arc::new(MetaKvBackend {
+ client: meta_client.clone(),
+ });
+ let table_routes = Arc::new(TableRoutes::new(meta_client.clone()));
+ let datanode_clients = Arc::new(DatanodeClients::new());
+ let catalog_manager = Arc::new(FrontendCatalogManager::new(
+ meta_backend,
+ table_routes,
+ datanode_clients.clone(),
+ ));
+
+ let dist_instance =
+ DistInstance::new(meta_client, catalog_manager.clone(), datanode_clients);
+ let dist_instance_ref = Arc::new(dist_instance.clone());
+
+ Ok(Instance {
+ catalog_manager: Some(catalog_manager),
+ script_handler: None,
+ create_expr_factory: Arc::new(DefaultCreateExprFactory),
+ mode: Mode::Distributed,
+ dist_instance: Some(dist_instance),
+ sql_handler: dist_instance_ref.clone(),
+ grpc_query_handler: dist_instance_ref.clone(),
+ grpc_admin_handler: dist_instance_ref,
+ })
}
- pub fn database(&self, database: &str) -> Database {
- Database::new(database, self.client.clone())
+ async fn create_meta_client(opts: &FrontendOptions) -> Result<Arc<MetaClient>> {
+ let metasrv_addr = &opts
+ .meta_client_opts
+ .as_ref()
+ .context(MissingMetasrvOptsSnafu)?
+ .metasrv_addrs;
+ info!(
+ "Creating Frontend instance in distributed mode with Meta server addr {:?}",
+ metasrv_addr
+ );
+
+ let meta_config = MetaClientOpts::default();
+ let channel_config = ChannelConfig::new()
+ .timeout(Duration::from_millis(meta_config.timeout_millis))
+ .connect_timeout(Duration::from_millis(meta_config.connect_timeout_millis))
+ .tcp_nodelay(meta_config.tcp_nodelay);
+ let channel_manager = ChannelManager::with_config(channel_config);
+
+ let mut meta_client = MetaClientBuilder::new(0, 0)
+ .enable_router()
+ .enable_store()
+ .channel_manager(channel_manager)
+ .build();
+ meta_client
+ .start(metasrv_addr)
+ .await
+ .context(error::StartMetaClientSnafu)?;
+ Ok(Arc::new(meta_client))
}
- pub fn admin(&self, database: &str) -> Admin {
- Admin::new(database, self.client.clone())
+ pub fn new_standalone(dn_instance: DnInstanceRef) -> Self {
+ Instance {
+ catalog_manager: None,
+ script_handler: None,
+ create_expr_factory: Arc::new(DefaultCreateExprFactory),
+ mode: Mode::Standalone,
+ dist_instance: None,
+ sql_handler: dn_instance.clone(),
+ grpc_query_handler: dn_instance.clone(),
+ grpc_admin_handler: dn_instance,
+ }
}
pub fn catalog_manager(&self) -> &Option<CatalogManagerRef> {
@@ -213,27 +202,6 @@ impl Instance {
self.script_handler = Some(handler);
}
- async fn handle_select(
- &self,
- expr: Select,
- stmt: Statement,
- query_ctx: QueryContextRef,
- ) -> Result<Output> {
- if let Some(dist_instance) = &self.dist_instance {
- let Select::Sql(sql) = expr;
- dist_instance.handle_sql(&sql, stmt, query_ctx).await
- } else {
- // TODO(LFC): Refactor consideration: Datanode should directly execute statement in standalone mode to avoid parse SQL again.
- // Find a better way to execute query between Frontend and Datanode in standalone mode.
- // Otherwise we have to parse SQL first to get schema name. Maybe not GRPC.
- self.database(DEFAULT_SCHEMA_NAME)
- .select(expr)
- .await
- .and_then(Output::try_from)
- .context(SelectSnafu)
- }
- }
-
/// Handle create expr.
pub async fn handle_create_table(
&self,
@@ -243,81 +211,38 @@ impl Instance {
if let Some(v) = &self.dist_instance {
v.create_table(&mut expr, partitions).await
} else {
- // Currently standalone mode does not support multi partitions/regions.
+ let expr = AdminExpr {
+ header: Some(ExprHeader {
+ version: PROTOCOL_VERSION,
+ }),
+ expr: Some(admin_expr::Expr::Create(expr)),
+ };
let result = self
- .admin(expr.schema_name.as_deref().unwrap_or(DEFAULT_SCHEMA_NAME))
- .create(expr.clone())
- .await;
- if let Err(e) = &result {
- error!(e; "Failed to create table by expr: {:?}", expr);
- }
- result
- .and_then(admin_result_to_output)
- .context(CreateTableSnafu)
+ .grpc_admin_handler
+ .exec_admin_request(expr)
+ .await
+ .context(error::InvokeGrpcServerSnafu)?;
+ admin_result_to_output(result).context(CreateTableSnafu)
}
}
/// Handle create database expr.
pub async fn handle_create_database(&self, expr: CreateDatabaseExpr) -> Result<Output> {
let database_name = expr.database_name.clone();
- if let Some(dist_instance) = &self.dist_instance {
- dist_instance.handle_create_database(expr).await
- } else {
- // FIXME(hl): In order to get admin client to create schema, we need to use the default schema admin
- self.admin(DEFAULT_SCHEMA_NAME)
- .create_database(expr)
- .await
- .and_then(admin_result_to_output)
- .context(CreateDatabaseSnafu {
- name: database_name,
- })
- }
- }
-
- /// Handle alter expr
- pub async fn handle_alter(&self, expr: AlterExpr) -> Result<Output> {
- match &self.dist_instance {
- Some(dist_instance) => dist_instance.handle_alter_table(expr).await,
- None => self
- .admin(expr.schema_name.as_deref().unwrap_or(DEFAULT_SCHEMA_NAME))
- .alter(expr)
- .await
- .and_then(admin_result_to_output)
- .context(AlterTableSnafu),
- }
- }
-
- /// Handle drop table expr
- pub async fn handle_drop_table(&self, expr: DropTableExpr) -> Result<Output> {
- match self.mode {
- Mode::Standalone => self
- .admin(&expr.schema_name)
- .drop_table(expr)
- .await
- .and_then(admin_result_to_output)
- .context(DropTableSnafu),
- // TODO(ruihang): support drop table in distributed mode
- Mode::Distributed => UnsupportedExprSnafu {
- name: "Distributed DROP TABLE",
- }
- .fail(),
- }
- }
-
- /// Handle explain expr
- pub async fn handle_explain(
- &self,
- sql: &str,
- explain_stmt: Explain,
- query_ctx: QueryContextRef,
- ) -> Result<Output> {
- if let Some(dist_instance) = &self.dist_instance {
- dist_instance
- .handle_sql(sql, Statement::Explain(explain_stmt), query_ctx)
- .await
- } else {
- Ok(Output::AffectedRows(0))
- }
+ let expr = AdminExpr {
+ header: Some(ExprHeader {
+ version: PROTOCOL_VERSION,
+ }),
+ expr: Some(admin_expr::Expr::CreateDatabase(expr)),
+ };
+ let result = self
+ .grpc_admin_handler
+ .exec_admin_request(expr)
+ .await
+ .context(error::InvokeGrpcServerSnafu)?;
+ admin_result_to_output(result).context(CreateDatabaseSnafu {
+ name: database_name,
+ })
}
/// Handle batch inserts
@@ -333,7 +258,7 @@ impl Instance {
}
/// Handle insert. for 'values' insertion, create/alter the destination table on demand.
- pub async fn handle_insert(&self, mut insert_expr: InsertExpr) -> Result<Output> {
+ async fn handle_insert(&self, mut insert_expr: InsertExpr) -> Result<Output> {
let table_name = &insert_expr.table_name;
let catalog_name = DEFAULT_CATALOG_NAME;
let schema_name = &insert_expr.schema_name;
@@ -345,11 +270,17 @@ impl Instance {
insert_expr.region_number = 0;
- self.database(schema_name)
- .insert(insert_expr)
+ let query = ObjectExpr {
+ header: Some(ExprHeader {
+ version: PROTOCOL_VERSION,
+ }),
+ expr: Some(Expr::Insert(insert_expr)),
+ };
+ let result = GrpcQueryHandler::do_query(&*self.grpc_query_handler, query)
.await
- .and_then(Output::try_from)
- .context(InsertSnafu)
+ .context(error::InvokeGrpcServerSnafu)?;
+ let result: ObjectResult = result.try_into().context(InsertSnafu)?;
+ result.try_into().context(InsertSnafu)
}
// check if table already exist:
@@ -455,11 +386,19 @@ impl Instance {
catalog_name: Some(catalog_name.to_string()),
kind: Some(Kind::AddColumns(add_columns)),
};
- self.admin(schema_name)
- .alter(expr)
+
+ let expr = AdminExpr {
+ header: Some(ExprHeader {
+ version: PROTOCOL_VERSION,
+ }),
+ expr: Some(admin_expr::Expr::Alter(expr)),
+ };
+ let result = self
+ .grpc_admin_handler
+ .exec_admin_request(expr)
.await
- .and_then(admin_result_to_output)
- .context(AlterTableOnInsertionSnafu)
+ .context(error::InvokeGrpcServerSnafu)?;
+ admin_result_to_output(result).context(AlterTableOnInsertionSnafu)
}
fn get_catalog(&self, catalog_name: &str) -> Result<CatalogProviderRef> {
@@ -547,20 +486,6 @@ impl FrontendInstance for Instance {
}
}
-#[cfg(test)]
-impl Instance {
- pub fn with_client_and_catalog_manager(client: Client, catalog: CatalogManagerRef) -> Self {
- Self {
- client,
- catalog_manager: Some(catalog),
- script_handler: None,
- create_expr_factory: Arc::new(DefaultCreateExprFactory),
- mode: Mode::Standalone,
- dist_instance: None,
- }
- }
-}
-
fn parse_stmt(sql: &str) -> Result<Statement> {
let mut stmt = ParserContext::create_with_dialect(sql, &GenericDialect {})
.context(error::ParseSqlSnafu)?;
@@ -587,12 +512,14 @@ impl SqlQueryHandler for Instance {
.context(server_error::ExecuteQuerySnafu { query })?;
match stmt {
- Statement::ShowDatabases(_)
+ Statement::CreateDatabase(_)
+ | Statement::ShowDatabases(_)
+ | Statement::CreateTable(_)
| Statement::ShowTables(_)
| Statement::DescribeTable(_)
+ | Statement::Explain(_)
| Statement::Query(_) => {
- self.handle_select(Select::Sql(query.to_string()), stmt, query_ctx)
- .await
+ return self.sql_handler.do_query(query, query_ctx).await;
}
Statement::Insert(insert) => match self.mode {
Mode::Standalone => {
@@ -629,30 +556,18 @@ impl SqlQueryHandler for Instance {
Ok(Output::AffectedRows(affected))
}
},
- Statement::CreateTable(create) => {
- let create_expr = self
- .create_expr_factory
- .create_expr_by_stmt(&create)
- .await
+ Statement::Alter(alter_stmt) => {
+ let expr = AlterExpr::try_from(alter_stmt)
.map_err(BoxedError::new)
- .context(server_error::ExecuteQuerySnafu { query })?;
-
- self.handle_create_table(create_expr, create.partitions)
- .await
- }
- Statement::CreateDatabase(c) => {
- let expr = CreateDatabaseExpr {
- database_name: c.name.to_string(),
+ .context(server_error::ExecuteAlterSnafu { query })?;
+ let expr = AdminExpr {
+ header: Some(ExprHeader {
+ version: PROTOCOL_VERSION,
+ }),
+ expr: Some(admin_expr::Expr::Alter(expr)),
};
- self.handle_create_database(expr).await
- }
- Statement::Alter(alter_stmt) => {
- self.handle_alter(
- AlterExpr::try_from(alter_stmt)
- .map_err(BoxedError::new)
- .context(server_error::ExecuteAlterSnafu { query })?,
- )
- .await
+ let result = self.grpc_admin_handler.exec_admin_request(expr).await?;
+ admin_result_to_output(result).context(error::InvalidAdminResultSnafu)
}
Statement::DropTable(drop_stmt) => {
let expr = DropTableExpr {
@@ -660,10 +575,14 @@ impl SqlQueryHandler for Instance {
schema_name: drop_stmt.schema_name,
table_name: drop_stmt.table_name,
};
- self.handle_drop_table(expr).await
- }
- Statement::Explain(explain_stmt) => {
- self.handle_explain(query, explain_stmt, query_ctx).await
+ let expr = AdminExpr {
+ header: Some(ExprHeader {
+ version: PROTOCOL_VERSION,
+ }),
+ expr: Some(admin_expr::Expr::DropTable(expr)),
+ };
+ let result = self.grpc_admin_handler.exec_admin_request(expr).await?;
+ admin_result_to_output(result).context(error::InvalidAdminResultSnafu)
}
Statement::ShowCreateTable(_) => {
return server_error::NotSupportedSnafu { feat: query }.fail();
@@ -703,81 +622,34 @@ impl ScriptHandler for Instance {
#[async_trait]
impl GrpcQueryHandler for Instance {
async fn do_query(&self, query: ObjectExpr) -> server_error::Result<GrpcObjectResult> {
- if let Some(expr) = &query.expr {
- match expr {
- Expr::Insert(insert) => {
- // TODO(fys): refactor, avoid clone
- let result = self.handle_insert(insert.clone()).await;
- result
- .map(|o| match o {
- Output::AffectedRows(rows) => ObjectResultBuilder::new()
- .status_code(StatusCode::Success as u32)
- .mutate_result(rows as u32, 0u32)
- .build(),
- _ => {
- unreachable!()
- }
- })
- .map_err(BoxedError::new)
- .with_context(|_| server_error::ExecuteQuerySnafu {
- query: format!("{:?}", query),
- })
- }
- Expr::Select(select) => {
- let select = select
- .expr
- .as_ref()
- .context(server_error::InvalidQuerySnafu {
- reason: "empty query",
- })?;
- match select {
- select_expr::Expr::Sql(sql) => {
- let query_ctx = Arc::new(QueryContext::new());
- let output = SqlQueryHandler::do_query(self, sql, query_ctx).await;
- Ok(to_object_result(output).await)
- }
- _ => {
- if self.dist_instance.is_some() {
- return server_error::NotSupportedSnafu {
- feat: "Executing plan directly in Frontend.",
- }
- .fail();
- }
- // FIXME(hl): refactor
- self.database(DEFAULT_SCHEMA_NAME)
- .object(query.clone())
- .await
- .map_err(BoxedError::new)
- .with_context(|_| server_error::ExecuteQuerySnafu {
- query: format!("{:?}", query),
- })
- }
- }
- }
- _ => server_error::NotSupportedSnafu {
- feat: "Currently only insert and select is supported in GRPC service.",
- }
- .fail(),
- }
- } else {
- server_error::InvalidQuerySnafu {
- reason: "empty query",
+ let expr = query
+ .clone()
+ .expr
+ .context(server_error::InvalidQuerySnafu {
+ reason: "empty expr",
+ })?;
+ match expr {
+ Expr::Insert(insert_expr) => {
+ let output = self
+ .handle_insert(insert_expr.clone())
+ .await
+ .map_err(BoxedError::new)
+ .with_context(|_| server_error::ExecuteQuerySnafu {
+ query: format!("{:?}", insert_expr),
+ })?;
+ let object_result = match output {
+ Output::AffectedRows(rows) => ObjectResultBuilder::default()
+ .mutate_result(rows as _, 0)
+ .build(),
+ _ => unreachable!(),
+ };
+ Ok(object_result)
}
- .fail()
+ _ => GrpcQueryHandler::do_query(&*self.grpc_query_handler, query).await,
}
}
}
-fn get_schema_name(expr: &AdminExpr) -> &str {
- let schema_name = match &expr.expr {
- Some(admin_expr::Expr::Create(expr)) => expr.schema_name.as_deref(),
- Some(admin_expr::Expr::Alter(expr)) => expr.schema_name.as_deref(),
- Some(admin_expr::Expr::CreateDatabase(_)) | None => Some(DEFAULT_SCHEMA_NAME),
- Some(admin_expr::Expr::DropTable(expr)) => Some(expr.schema_name.as_ref()),
- };
- schema_name.unwrap_or(DEFAULT_SCHEMA_NAME)
-}
-
#[async_trait]
impl GrpcAdminHandler for Instance {
async fn exec_admin_request(&self, mut expr: AdminExpr) -> server_error::Result<AdminResult> {
@@ -786,13 +658,7 @@ impl GrpcAdminHandler for Instance {
if let Some(api::v1::admin_expr::Expr::Create(create)) = &mut expr.expr {
create.table_id = None;
}
- self.admin(get_schema_name(&expr))
- .do_request(expr.clone())
- .await
- .map_err(BoxedError::new)
- .with_context(|_| server_error::ExecuteQuerySnafu {
- query: format!("{:?}", expr),
- })
+ self.grpc_admin_handler.exec_admin_request(expr).await
}
}
@@ -808,6 +674,7 @@ mod tests {
};
use datatypes::schema::ColumnDefaultConstraint;
use datatypes::value::Value;
+ use session::context::QueryContext;
use super::*;
use crate::tests;
@@ -853,7 +720,8 @@ mod tests {
.await
.unwrap();
match output {
- Output::RecordBatches(recordbatches) => {
+ Output::Stream(stream) => {
+ let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
let pretty_print = recordbatches.pretty_print();
let pretty_print = pretty_print.lines().collect::<Vec<&str>>();
let expected = vec![
@@ -875,7 +743,8 @@ mod tests {
.await
.unwrap();
match output {
- Output::RecordBatches(recordbatches) => {
+ Output::Stream(stream) => {
+ let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
let pretty_print = recordbatches.pretty_print();
let pretty_print = pretty_print.lines().collect::<Vec<&str>>();
let expected = vec![
diff --git a/src/frontend/src/instance/distributed.rs b/src/frontend/src/instance/distributed.rs
index d32e12ee24d4..2613654f8fbd 100644
--- a/src/frontend/src/instance/distributed.rs
+++ b/src/frontend/src/instance/distributed.rs
@@ -16,12 +16,18 @@ use std::collections::HashMap;
use std::sync::Arc;
use api::helper::ColumnDataTypeWrapper;
-use api::v1::{AlterExpr, CreateDatabaseExpr, CreateExpr};
+use api::result::AdminResultBuilder;
+use api::v1::{
+ admin_expr, AdminExpr, AdminResult, AlterExpr, CreateDatabaseExpr, CreateExpr, ObjectExpr,
+ ObjectResult,
+};
+use async_trait::async_trait;
use catalog::helper::{SchemaKey, SchemaValue, TableGlobalKey, TableGlobalValue};
use catalog::CatalogList;
use chrono::DateTime;
use client::admin::{admin_result_to_output, Admin};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
+use common_error::prelude::BoxedError;
use common_query::Output;
use common_telemetry::{debug, error, info};
use datatypes::prelude::ConcreteDataType;
@@ -33,6 +39,8 @@ use meta_client::rpc::{
};
use query::sql::{describe_table, explain, show_databases, show_tables};
use query::{QueryEngineFactory, QueryEngineRef};
+use servers::error as server_error;
+use servers::query_handler::{GrpcAdminHandler, GrpcQueryHandler, SqlQueryHandler};
use session::context::QueryContextRef;
use snafu::{ensure, OptionExt, ResultExt};
use sql::statements::create::Partitions;
@@ -48,6 +56,8 @@ use crate::error::{
PrimaryKeyNotFoundSnafu, RequestMetaSnafu, Result, SchemaNotFoundSnafu, StartMetaClientSnafu,
TableNotFoundSnafu,
};
+use crate::expr_factory::{CreateExprFactory, DefaultCreateExprFactory};
+use crate::instance::parse_stmt;
use crate::partitioning::{PartitionBound, PartitionDef};
use crate::table::DistTable;
@@ -126,15 +136,12 @@ impl DistInstance {
.context(error::InvalidAdminResultSnafu)?;
}
- Ok(Output::AffectedRows(region_routes.len()))
+ // Checked in real MySQL, it truly returns "0 rows affected".
+ Ok(Output::AffectedRows(0))
}
- pub(crate) async fn handle_sql(
- &self,
- sql: &str,
- stmt: Statement,
- query_ctx: QueryContextRef,
- ) -> Result<Output> {
+ async fn handle_sql(&self, sql: &str, query_ctx: QueryContextRef) -> Result<Output> {
+ let stmt = parse_stmt(sql)?;
match stmt {
Statement::Query(_) => {
let plan = self
@@ -143,6 +150,17 @@ impl DistInstance {
.context(error::ExecuteSqlSnafu { sql })?;
self.query_engine.execute(&plan).await
}
+ Statement::CreateDatabase(stmt) => {
+ let expr = CreateDatabaseExpr {
+ database_name: stmt.name.to_string(),
+ };
+ self.handle_create_database(expr).await?;
+ Ok(Output::AffectedRows(1))
+ }
+ Statement::CreateTable(stmt) => {
+ let create_expr = &mut DefaultCreateExprFactory.create_expr_by_stmt(&stmt).await?;
+ Ok(self.create_table(create_expr, stmt.partitions).await?)
+ }
Statement::ShowDatabases(stmt) => show_databases(stmt, self.catalog_manager.clone()),
Statement::ShowTables(stmt) => {
show_tables(stmt, self.catalog_manager.clone(), query_ctx)
@@ -157,7 +175,7 @@ impl DistInstance {
}
/// Handles distributed database creation
- pub(crate) async fn handle_create_database(&self, expr: CreateDatabaseExpr) -> Result<Output> {
+ async fn handle_create_database(&self, expr: CreateDatabaseExpr) -> Result<()> {
let key = SchemaKey {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: expr.database_name,
@@ -172,10 +190,10 @@ impl DistInstance {
.with_key(key.to_string())
.with_value(value.as_bytes().context(CatalogEntrySerdeSnafu)?);
client.put(request.into()).await.context(RequestMetaSnafu)?;
- Ok(Output::AffectedRows(1))
+ Ok(())
}
- pub async fn handle_alter_table(&self, expr: AlterExpr) -> Result<Output> {
+ async fn handle_alter_table(&self, expr: AlterExpr) -> Result<AdminResult> {
let catalog_name = expr.catalog_name.as_deref().unwrap_or(DEFAULT_CATALOG_NAME);
let schema_name = expr.schema_name.as_deref().unwrap_or(DEFAULT_SCHEMA_NAME);
let table_name = expr.table_name.as_str();
@@ -200,7 +218,7 @@ impl DistInstance {
.downcast_ref::<DistTable>()
.expect("Table impl must be DistTable in distributed mode");
dist_table.alter_by_expr(expr).await?;
- Ok(Output::AffectedRows(0))
+ Ok(AdminResultBuilder::default().mutate_result(0, 0).build())
}
async fn create_table_in_meta(
@@ -269,6 +287,56 @@ impl DistInstance {
}
Ok(())
}
+
+ #[cfg(test)]
+ pub(crate) fn catalog_manager(&self) -> Arc<FrontendCatalogManager> {
+ self.catalog_manager.clone()
+ }
+}
+
+#[async_trait]
+impl SqlQueryHandler for DistInstance {
+ async fn do_query(
+ &self,
+ query: &str,
+ query_ctx: QueryContextRef,
+ ) -> server_error::Result<Output> {
+ self.handle_sql(query, query_ctx)
+ .await
+ .map_err(BoxedError::new)
+ .context(server_error::ExecuteQuerySnafu { query })
+ }
+}
+
+#[async_trait]
+impl GrpcQueryHandler for DistInstance {
+ async fn do_query(&self, _: ObjectExpr) -> server_error::Result<ObjectResult> {
+ unimplemented!()
+ }
+}
+
+#[async_trait]
+impl GrpcAdminHandler for DistInstance {
+ async fn exec_admin_request(&self, query: AdminExpr) -> server_error::Result<AdminResult> {
+ let expr = query
+ .clone()
+ .expr
+ .context(server_error::InvalidQuerySnafu {
+ reason: "empty expr",
+ })?;
+ match expr {
+ admin_expr::Expr::CreateDatabase(create_database) => self
+ .handle_create_database(create_database)
+ .await
+ .map(|_| AdminResultBuilder::default().mutate_result(1, 0).build()),
+ admin_expr::Expr::Alter(alter) => self.handle_alter_table(alter).await,
+ _ => unimplemented!(),
+ }
+ .map_err(BoxedError::new)
+ .context(server_error::ExecuteQuerySnafu {
+ query: format!("{:?}", query),
+ })
+ }
}
fn create_table_global_value(
@@ -454,12 +522,15 @@ fn find_partition_columns(
#[cfg(test)]
mod test {
+ use servers::query_handler::SqlQueryHandlerRef;
+ use session::context::QueryContext;
use sql::parser::ParserContext;
use sql::statements::statement::Statement;
use sqlparser::dialect::GenericDialect;
use super::*;
use crate::expr_factory::{CreateExprFactory, DefaultCreateExprFactory};
+ use crate::tests::create_dist_instance;
#[tokio::test]
async fn test_parse_partitions() {
@@ -492,9 +563,10 @@ ENGINE=mito",
let result = ParserContext::create_with_dialect(sql, &GenericDialect {}).unwrap();
match &result[0] {
Statement::CreateTable(c) => {
- common_telemetry::info!("{}", sql);
- let factory = DefaultCreateExprFactory {};
- let expr = factory.create_expr_by_stmt(c).await.unwrap();
+ let expr = DefaultCreateExprFactory
+ .create_expr_by_stmt(c)
+ .await
+ .unwrap();
let partitions = parse_partitions(&expr, c.partitions.clone()).unwrap();
let json = serde_json::to_string(&partitions).unwrap();
assert_eq!(json, expected);
@@ -503,4 +575,103 @@ ENGINE=mito",
}
}
}
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_show_databases() {
+ let (dist_instance, _) = create_dist_instance().await;
+
+ let sql = "create database test_show_databases";
+ let output = dist_instance
+ .handle_sql(sql, QueryContext::arc())
+ .await
+ .unwrap();
+ match output {
+ Output::AffectedRows(rows) => assert_eq!(rows, 1),
+ _ => unreachable!(),
+ }
+
+ let sql = "show databases";
+ let output = dist_instance
+ .handle_sql(sql, QueryContext::arc())
+ .await
+ .unwrap();
+ match output {
+ Output::RecordBatches(r) => {
+ let expected1 = vec![
+ "+---------------------+",
+ "| Schemas |",
+ "+---------------------+",
+ "| public |",
+ "| test_show_databases |",
+ "+---------------------+",
+ ];
+ let expected2 = vec![
+ "+---------------------+",
+ "| Schemas |",
+ "+---------------------+",
+ "| test_show_databases |",
+ "| public |",
+ "+---------------------+",
+ ];
+ let pretty = r.pretty_print();
+ let lines = pretty.lines().collect::<Vec<_>>();
+ assert!(lines == expected1 || lines == expected2)
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_show_tables() {
+ let (dist_instance, datanode_instances) = create_dist_instance().await;
+
+ let sql = "create database test_show_tables";
+ dist_instance
+ .handle_sql(sql, QueryContext::arc())
+ .await
+ .unwrap();
+
+ let sql = "
+ CREATE TABLE greptime.test_show_tables.dist_numbers (
+ ts BIGINT,
+ n INT,
+ TIME INDEX (ts),
+ )
+ PARTITION BY RANGE COLUMNS (n) (
+ PARTITION r0 VALUES LESS THAN (10),
+ PARTITION r1 VALUES LESS THAN (20),
+ PARTITION r2 VALUES LESS THAN (50),
+ PARTITION r3 VALUES LESS THAN (MAXVALUE),
+ )
+ ENGINE=mito";
+ dist_instance
+ .handle_sql(sql, QueryContext::arc())
+ .await
+ .unwrap();
+
+ async fn assert_show_tables(instance: SqlQueryHandlerRef) {
+ let sql = "show tables in test_show_tables";
+ let output = instance.do_query(sql, QueryContext::arc()).await.unwrap();
+ match output {
+ Output::RecordBatches(r) => {
+ let expected = vec![
+ "+--------------+",
+ "| Tables |",
+ "+--------------+",
+ "| dist_numbers |",
+ "+--------------+",
+ ];
+ assert_eq!(r.pretty_print().lines().collect::<Vec<_>>(), expected);
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ assert_show_tables(Arc::new(dist_instance)).await;
+
+ // Asserts that new table is created in Datanode as well.
+ for x in datanode_instances.values() {
+ assert_show_tables(x.clone()).await
+ }
+ }
}
diff --git a/src/frontend/src/instance/opentsdb.rs b/src/frontend/src/instance/opentsdb.rs
index 66b04b13170f..e2c0c91ee088 100644
--- a/src/frontend/src/instance/opentsdb.rs
+++ b/src/frontend/src/instance/opentsdb.rs
@@ -19,7 +19,6 @@ use servers::query_handler::OpentsdbProtocolHandler;
use servers::{error as server_error, Mode};
use snafu::prelude::*;
-use crate::error::Result;
use crate::instance::Instance;
#[async_trait]
@@ -29,12 +28,7 @@ impl OpentsdbProtocolHandler for Instance {
// metric table and tags can be created upon insertion.
match self.mode {
Mode::Standalone => {
- self.insert_opentsdb_metric(data_point)
- .await
- .map_err(BoxedError::new)
- .with_context(|_| server_error::PutOpentsdbDataPointSnafu {
- data_point: format!("{:?}", data_point),
- })?;
+ self.insert_opentsdb_metric(data_point).await?;
}
Mode::Distributed => {
self.dist_insert(vec![data_point.as_grpc_insert()])
@@ -51,9 +45,14 @@ impl OpentsdbProtocolHandler for Instance {
}
impl Instance {
- async fn insert_opentsdb_metric(&self, data_point: &DataPoint) -> Result<()> {
- let expr = data_point.as_grpc_insert();
- self.handle_insert(expr).await?;
+ async fn insert_opentsdb_metric(&self, data_point: &DataPoint) -> server_error::Result<()> {
+ let insert_expr = data_point.as_grpc_insert();
+ self.handle_insert(insert_expr)
+ .await
+ .map_err(BoxedError::new)
+ .with_context(|_| server_error::ExecuteQuerySnafu {
+ query: format!("{:?}", data_point),
+ })?;
Ok(())
}
}
@@ -63,6 +62,7 @@ mod tests {
use std::sync::Arc;
use common_query::Output;
+ use common_recordbatch::RecordBatches;
use datafusion::arrow_print;
use servers::query_handler::SqlQueryHandler;
use session::context::QueryContext;
@@ -128,7 +128,8 @@ mod tests {
.await
.unwrap();
match output {
- Output::RecordBatches(recordbatches) => {
+ Output::Stream(stream) => {
+ let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
let recordbatches = recordbatches
.take()
.into_iter()
diff --git a/src/frontend/src/instance/prometheus.rs b/src/frontend/src/instance/prometheus.rs
index b6f322beb216..b1ad7ad53cf0 100644
--- a/src/frontend/src/instance/prometheus.rs
+++ b/src/frontend/src/instance/prometheus.rs
@@ -17,11 +17,10 @@ use std::sync::Arc;
use api::prometheus::remote::read_request::ResponseType;
use api::prometheus::remote::{Query, QueryResult, ReadRequest, ReadResponse, WriteRequest};
use async_trait::async_trait;
-use client::{ObjectResult, Select};
+use client::ObjectResult;
use common_error::prelude::BoxedError;
use common_grpc::select::to_object_result;
use common_telemetry::logging;
-use futures_util::TryFutureExt;
use prost::Message;
use servers::error::{self, Result as ServerResult};
use servers::prometheus::{self, Metrics};
@@ -30,7 +29,7 @@ use servers::Mode;
use session::context::QueryContext;
use snafu::{OptionExt, ResultExt};
-use crate::instance::{parse_stmt, Instance};
+use crate::instance::Instance;
const SAMPLES_RESPONSE_TYPE: i32 = ResponseType::Samples as i32;
@@ -94,19 +93,14 @@ impl Instance {
sql
);
- let object_result = if let Some(dist_instance) = &self.dist_instance {
- let output = futures::future::ready(parse_stmt(&sql))
- .and_then(|stmt| {
- let query_ctx = Arc::new(QueryContext::with_current_schema(db.to_string()));
- dist_instance.handle_sql(&sql, stmt, query_ctx)
- })
- .await;
- to_object_result(output).await.try_into()
- } else {
- self.database(db).select(Select::Sql(sql.clone())).await
- }
- .map_err(BoxedError::new)
- .context(error::ExecuteQuerySnafu { query: sql })?;
+ let query_ctx = Arc::new(QueryContext::with_current_schema(db.to_string()));
+ let output = self.sql_handler.do_query(&sql, query_ctx).await;
+
+ let object_result = to_object_result(output)
+ .await
+ .try_into()
+ .map_err(BoxedError::new)
+ .context(error::ExecuteQuerySnafu { query: sql })?;
results.push((table_name, object_result));
}
@@ -117,34 +111,25 @@ impl Instance {
#[async_trait]
impl PrometheusProtocolHandler for Instance {
async fn write(&self, database: &str, request: WriteRequest) -> ServerResult<()> {
+ let exprs = prometheus::write_request_to_insert_exprs(database, request.clone())?;
match self.mode {
Mode::Standalone => {
- let exprs = prometheus::write_request_to_insert_exprs(database, request)?;
- let futures = exprs
- .into_iter()
- .map(|e| self.handle_insert(e))
- .collect::<Vec<_>>();
- let res = futures_util::future::join_all(futures)
+ self.handle_inserts(exprs)
.await
- .into_iter()
- .collect::<Result<Vec<_>, crate::error::Error>>();
- res.map_err(BoxedError::new)
- .context(error::ExecuteInsertSnafu {
- msg: "failed to write prometheus remote request",
+ .map_err(BoxedError::new)
+ .with_context(|_| error::ExecuteInsertSnafu {
+ msg: format!("{:?}", request),
})?;
}
Mode::Distributed => {
- let inserts = prometheus::write_request_to_insert_exprs(database, request)?;
-
- self.dist_insert(inserts)
+ self.dist_insert(exprs)
.await
.map_err(BoxedError::new)
- .context(error::ExecuteInsertSnafu {
- msg: "execute insert failed",
+ .with_context(|_| error::ExecuteInsertSnafu {
+ msg: format!("{:?}", request),
})?;
}
}
-
Ok(())
}
diff --git a/src/frontend/src/table.rs b/src/frontend/src/table.rs
index 36d229a24512..ac97d2dc3ca2 100644
--- a/src/frontend/src/table.rs
+++ b/src/frontend/src/table.rs
@@ -505,42 +505,33 @@ impl PartitionExec {
}
}
-// FIXME(LFC): no allow, for clippy temporarily
-#[allow(clippy::print_stdout)]
#[cfg(test)]
mod test {
- use std::time::Duration;
-
use api::v1::column::SemanticType;
use api::v1::{column, Column, ColumnDataType};
- use catalog::remote::MetaKvBackend;
- use common_recordbatch::util;
- use datafusion::arrow_print;
- use datafusion_common::record_batch::RecordBatch as DfRecordBatch;
+ use common_query::physical_plan::DfPhysicalPlanAdapter;
+ use common_recordbatch::adapter::RecordBatchStreamAdapter;
+ use datafusion::physical_plan::coalesce_partitions::CoalescePartitionsExec;
+ use datafusion::physical_plan::expressions::{col as physical_col, PhysicalSortExpr};
+ use datafusion::physical_plan::sorts::sort::SortExec;
+ use datafusion::physical_plan::ExecutionPlan;
use datafusion_expr::expr_fn::{and, binary_expr, col, or};
use datafusion_expr::lit;
- use datanode::datanode::{DatanodeOptions, ObjectStoreConfig};
use datanode::instance::Instance;
+ use datatypes::arrow::compute::sort::SortOptions;
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::{ColumnSchema, Schema};
- use meta_client::client::{MetaClient, MetaClientBuilder};
+ use meta_client::client::MetaClient;
use meta_client::rpc::router::RegionRoute;
use meta_client::rpc::{Region, Table, TableRoute};
- use meta_srv::metasrv::MetaSrvOptions;
- use meta_srv::mocks::MockInfo;
- use meta_srv::service::store::kv::KvStoreRef;
- use meta_srv::service::store::memory::MemStore;
use sql::parser::ParserContext;
use sql::statements::statement::Statement;
use sqlparser::dialect::GenericDialect;
use table::metadata::{TableInfoBuilder, TableMetaBuilder};
use table::TableRef;
- use tempdir::TempDir;
use super::*;
- use crate::catalog::FrontendCatalogManager;
use crate::expr_factory::{CreateExprFactory, DefaultCreateExprFactory};
- use crate::instance::distributed::DistInstance;
use crate::partitioning::range::RangePartitionRule;
#[tokio::test(flavor = "multi_thread")]
@@ -741,29 +732,78 @@ mod test {
#[tokio::test(flavor = "multi_thread")]
async fn test_dist_table_scan() {
- common_telemetry::init_default_ut_logging();
let table = Arc::new(new_dist_table().await);
// should scan all regions
- // select * from numbers
- let projection = None;
+ // select a, row_id from numbers
+ let projection = Some(vec![1, 2]);
let filters = vec![];
- exec_table_scan(table.clone(), projection, filters, None).await;
- println!();
+ let expected_output = vec![
+ "+-----+--------+",
+ "| a | row_id |",
+ "+-----+--------+",
+ "| 0 | 1 |",
+ "| 1 | 2 |",
+ "| 2 | 3 |",
+ "| 3 | 4 |",
+ "| 4 | 5 |",
+ "| 10 | 1 |",
+ "| 11 | 2 |",
+ "| 12 | 3 |",
+ "| 13 | 4 |",
+ "| 14 | 5 |",
+ "| 30 | 1 |",
+ "| 31 | 2 |",
+ "| 32 | 3 |",
+ "| 33 | 4 |",
+ "| 34 | 5 |",
+ "| 100 | 1 |",
+ "| 101 | 2 |",
+ "| 102 | 3 |",
+ "| 103 | 4 |",
+ "| 104 | 5 |",
+ "+-----+--------+",
+ ];
+ exec_table_scan(table.clone(), projection, filters, 4, expected_output).await;
// should scan only region 1
// select a, row_id from numbers where a < 10
let projection = Some(vec![1, 2]);
let filters = vec![binary_expr(col("a"), Operator::Lt, lit(10)).into()];
- exec_table_scan(table.clone(), projection, filters, None).await;
- println!();
+ let expected_output = vec![
+ "+---+--------+",
+ "| a | row_id |",
+ "+---+--------+",
+ "| 0 | 1 |",
+ "| 1 | 2 |",
+ "| 2 | 3 |",
+ "| 3 | 4 |",
+ "| 4 | 5 |",
+ "+---+--------+",
+ ];
+ exec_table_scan(table.clone(), projection, filters, 1, expected_output).await;
// should scan region 1 and 2
// select a, row_id from numbers where a < 15
let projection = Some(vec![1, 2]);
let filters = vec![binary_expr(col("a"), Operator::Lt, lit(15)).into()];
- exec_table_scan(table.clone(), projection, filters, None).await;
- println!();
+ let expected_output = vec![
+ "+----+--------+",
+ "| a | row_id |",
+ "+----+--------+",
+ "| 0 | 1 |",
+ "| 1 | 2 |",
+ "| 2 | 3 |",
+ "| 3 | 4 |",
+ "| 4 | 5 |",
+ "| 10 | 1 |",
+ "| 11 | 2 |",
+ "| 12 | 3 |",
+ "| 13 | 4 |",
+ "| 14 | 5 |",
+ "+----+--------+",
+ ];
+ exec_table_scan(table.clone(), projection, filters, 2, expected_output).await;
// should scan region 2 and 3
// select a, row_id from numbers where a < 40 and a >= 10
@@ -773,8 +813,23 @@ mod test {
binary_expr(col("a"), Operator::GtEq, lit(10)),
)
.into()];
- exec_table_scan(table.clone(), projection, filters, None).await;
- println!();
+ let expected_output = vec![
+ "+----+--------+",
+ "| a | row_id |",
+ "+----+--------+",
+ "| 10 | 1 |",
+ "| 11 | 2 |",
+ "| 12 | 3 |",
+ "| 13 | 4 |",
+ "| 14 | 5 |",
+ "| 30 | 1 |",
+ "| 31 | 2 |",
+ "| 32 | 3 |",
+ "| 33 | 4 |",
+ "| 34 | 5 |",
+ "+----+--------+",
+ ];
+ exec_table_scan(table.clone(), projection, filters, 2, expected_output).await;
// should scan all regions
// select a, row_id from numbers where a < 1000 and row_id == 1
@@ -784,36 +839,59 @@ mod test {
binary_expr(col("row_id"), Operator::Eq, lit(1)),
)
.into()];
- exec_table_scan(table.clone(), projection, filters, None).await;
+ let expected_output = vec![
+ "+-----+--------+",
+ "| a | row_id |",
+ "+-----+--------+",
+ "| 0 | 1 |",
+ "| 10 | 1 |",
+ "| 30 | 1 |",
+ "| 100 | 1 |",
+ "+-----+--------+",
+ ];
+ exec_table_scan(table.clone(), projection, filters, 4, expected_output).await;
}
async fn exec_table_scan(
table: TableRef,
projection: Option<Vec<usize>>,
filters: Vec<Expr>,
- limit: Option<usize>,
+ expected_partitions: usize,
+ expected_output: Vec<&str>,
) {
let table_scan = table
- .scan(&projection, filters.as_slice(), limit)
+ .scan(&projection, filters.as_slice(), None)
.await
.unwrap();
+ assert_eq!(
+ table_scan.output_partitioning().partition_count(),
+ expected_partitions
+ );
- for partition in 0..table_scan.output_partitioning().partition_count() {
- let result = table_scan
- .execute(partition, Arc::new(RuntimeEnv::default()))
- .unwrap();
- let recordbatches = util::collect(result).await.unwrap();
+ let merge =
+ CoalescePartitionsExec::new(Arc::new(DfPhysicalPlanAdapter(table_scan.clone())));
- let df_recordbatch = recordbatches
- .into_iter()
- .map(|r| r.df_recordbatch)
- .collect::<Vec<DfRecordBatch>>();
+ let sort = SortExec::try_new(
+ vec![PhysicalSortExpr {
+ expr: physical_col("a", table_scan.schema().arrow_schema()).unwrap(),
+ options: SortOptions::default(),
+ }],
+ Arc::new(merge),
+ )
+ .unwrap();
+ assert_eq!(sort.output_partitioning().partition_count(), 1);
- println!("DataFusion partition {}:", partition);
- let pretty_print = arrow_print::write(&df_recordbatch);
- let pretty_print = pretty_print.lines().collect::<Vec<&str>>();
- pretty_print.iter().for_each(|x| println!("{}", x));
- }
+ let stream = sort
+ .execute(0, Arc::new(RuntimeEnv::default()))
+ .await
+ .unwrap();
+ let stream = Box::pin(RecordBatchStreamAdapter::try_new(stream).unwrap());
+
+ let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
+ assert_eq!(
+ recordbatches.pretty_print().lines().collect::<Vec<_>>(),
+ expected_output
+ );
}
async fn new_dist_table() -> DistTable {
@@ -824,52 +902,13 @@ mod test {
];
let schema = Arc::new(Schema::new(column_schemas.clone()));
- let kv_store: KvStoreRef = Arc::new(MemStore::default()) as _;
- let meta_srv =
- meta_srv::mocks::mock(MetaSrvOptions::default(), kv_store.clone(), None).await;
-
- let datanode_clients = Arc::new(DatanodeClients::new());
-
- let mut datanode_instances = HashMap::new();
- for datanode_id in 1..=4 {
- let dn_instance = create_datanode_instance(datanode_id, meta_srv.clone()).await;
- datanode_instances.insert(datanode_id, dn_instance.clone());
-
- let (addr, client) = crate::tests::create_datanode_client(dn_instance).await;
- datanode_clients
- .insert_client(Peer::new(datanode_id, addr), client)
- .await;
- }
-
- let MockInfo {
- server_addr,
- channel_manager,
- } = meta_srv.clone();
- let mut meta_client = MetaClientBuilder::new(1000, 0)
- .enable_router()
- .enable_store()
- .channel_manager(channel_manager)
- .build();
- meta_client.start(&[&server_addr]).await.unwrap();
- let meta_client = Arc::new(meta_client);
+ let (dist_instance, datanode_instances) = crate::tests::create_dist_instance().await;
+ let catalog_manager = dist_instance.catalog_manager();
+ let table_routes = catalog_manager.table_routes();
+ let datanode_clients = catalog_manager.datanode_clients();
let table_name = TableName::new("greptime", "public", "dist_numbers");
- let meta_backend = Arc::new(MetaKvBackend {
- client: meta_client.clone(),
- });
- let table_routes = Arc::new(TableRoutes::new(meta_client.clone()));
- let catalog_manager = Arc::new(FrontendCatalogManager::new(
- meta_backend,
- table_routes.clone(),
- datanode_clients.clone(),
- ));
- let dist_instance = DistInstance::new(
- meta_client.clone(),
- catalog_manager,
- datanode_clients.clone(),
- );
-
let sql = "
CREATE TABLE greptime.public.dist_numbers (
ts BIGINT,
@@ -893,17 +932,16 @@ mod test {
_ => unreachable!(),
};
- wait_datanodes_alive(kv_store).await;
-
- let factory = DefaultCreateExprFactory {};
- let mut expr = factory.create_expr_by_stmt(&create_table).await.unwrap();
+ let mut expr = DefaultCreateExprFactory
+ .create_expr_by_stmt(&create_table)
+ .await
+ .unwrap();
let _result = dist_instance
.create_table(&mut expr, create_table.partitions)
.await
.unwrap();
let table_route = table_routes.get_route(&table_name).await.unwrap();
- println!("{}", serde_json::to_string_pretty(&table_route).unwrap());
let mut region_to_datanode_mapping = HashMap::new();
for region_route in table_route.region_routes.iter() {
@@ -948,20 +986,6 @@ mod test {
}
}
- async fn wait_datanodes_alive(kv_store: KvStoreRef) {
- let wait = 10;
- for _ in 0..wait {
- let datanodes = meta_srv::lease::alive_datanodes(1000, &kv_store, |_, _| true)
- .await
- .unwrap();
- if datanodes.len() >= 4 {
- return;
- }
- tokio::time::sleep(Duration::from_secs(1)).await
- }
- panic!()
- }
-
async fn insert_testing_data(
table_name: &TableName,
dn_instance: Arc<Instance>,
@@ -1013,30 +1037,6 @@ mod test {
.unwrap();
}
- async fn create_datanode_instance(datanode_id: u64, meta_srv: MockInfo) -> Arc<Instance> {
- let current = common_time::util::current_time_millis();
- let wal_tmp_dir =
- TempDir::new_in("/tmp", &format!("dist_table_test-wal-{}", current)).unwrap();
- let data_tmp_dir =
- TempDir::new_in("/tmp", &format!("dist_table_test-data-{}", current)).unwrap();
- let opts = DatanodeOptions {
- node_id: Some(datanode_id),
- wal_dir: wal_tmp_dir.path().to_str().unwrap().to_string(),
- storage: ObjectStoreConfig::File {
- data_dir: data_tmp_dir.path().to_str().unwrap().to_string(),
- },
- ..Default::default()
- };
-
- let instance = Arc::new(
- Instance::with_mock_meta_server(&opts, meta_srv)
- .await
- .unwrap(),
- );
- instance.start().await.unwrap();
- instance
- }
-
#[tokio::test(flavor = "multi_thread")]
async fn test_find_regions() {
let schema = Arc::new(Schema::new(vec![ColumnSchema::new(
diff --git a/src/frontend/src/tests.rs b/src/frontend/src/tests.rs
index 7e59bb3908cf..4cb1360fea5f 100644
--- a/src/frontend/src/tests.rs
+++ b/src/frontend/src/tests.rs
@@ -12,17 +12,32 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::collections::HashMap;
use std::sync::Arc;
+use std::time::Duration;
+use catalog::remote::MetaKvBackend;
use client::Client;
use common_grpc::channel_manager::ChannelManager;
use common_runtime::Builder as RuntimeBuilder;
+use datanode::datanode::{DatanodeOptions, ObjectStoreConfig};
use datanode::instance::Instance as DatanodeInstance;
+use meta_client::client::MetaClientBuilder;
+use meta_client::rpc::Peer;
+use meta_srv::metasrv::MetaSrvOptions;
+use meta_srv::mocks::MockInfo;
+use meta_srv::service::store::kv::KvStoreRef;
+use meta_srv::service::store::memory::MemStore;
use servers::grpc::GrpcServer;
+use tempdir::TempDir;
use tonic::transport::Server;
use tower::service_fn;
+use crate::catalog::FrontendCatalogManager;
+use crate::datanode::DatanodeClients;
+use crate::instance::distributed::DistInstance;
use crate::instance::Instance;
+use crate::table::route::TableRoutes;
async fn create_datanode_instance() -> Arc<DatanodeInstance> {
// TODO(LFC) Use real Mito engine when we can alter its region schema,
@@ -35,11 +50,10 @@ async fn create_datanode_instance() -> Arc<DatanodeInstance> {
pub(crate) async fn create_frontend_instance() -> Arc<Instance> {
let datanode_instance: Arc<DatanodeInstance> = create_datanode_instance().await;
let dn_catalog_manager = datanode_instance.catalog_manager().clone();
- let (_, client) = create_datanode_client(datanode_instance).await;
- Arc::new(Instance::with_client_and_catalog_manager(
- client,
- dn_catalog_manager,
- ))
+
+ let mut frontend_instance = Instance::new_standalone(datanode_instance);
+ frontend_instance.set_catalog_manager(dn_catalog_manager);
+ Arc::new(frontend_instance)
}
pub(crate) async fn create_datanode_client(
@@ -96,3 +110,91 @@ pub(crate) async fn create_datanode_client(
Client::with_manager_and_urls(channel_manager, vec![addr]),
)
}
+
+async fn create_dist_datanode_instance(
+ datanode_id: u64,
+ meta_srv: MockInfo,
+) -> Arc<DatanodeInstance> {
+ let current = common_time::util::current_time_millis();
+ let wal_tmp_dir = TempDir::new_in("/tmp", &format!("dist_datanode-wal-{}", current)).unwrap();
+ let data_tmp_dir = TempDir::new_in("/tmp", &format!("dist_datanode-data-{}", current)).unwrap();
+ let opts = DatanodeOptions {
+ node_id: Some(datanode_id),
+ wal_dir: wal_tmp_dir.path().to_str().unwrap().to_string(),
+ storage: ObjectStoreConfig::File {
+ data_dir: data_tmp_dir.path().to_str().unwrap().to_string(),
+ },
+ ..Default::default()
+ };
+
+ let instance = Arc::new(
+ DatanodeInstance::with_mock_meta_server(&opts, meta_srv)
+ .await
+ .unwrap(),
+ );
+ instance.start().await.unwrap();
+ instance
+}
+
+async fn wait_datanodes_alive(kv_store: KvStoreRef) {
+ let wait = 10;
+ for _ in 0..wait {
+ let datanodes = meta_srv::lease::alive_datanodes(1000, &kv_store, |_, _| true)
+ .await
+ .unwrap();
+ if datanodes.len() >= 4 {
+ return;
+ }
+ tokio::time::sleep(Duration::from_secs(1)).await
+ }
+ panic!()
+}
+
+pub(crate) async fn create_dist_instance() -> (DistInstance, HashMap<u64, Arc<DatanodeInstance>>) {
+ let kv_store: KvStoreRef = Arc::new(MemStore::default()) as _;
+ let meta_srv = meta_srv::mocks::mock(MetaSrvOptions::default(), kv_store.clone(), None).await;
+
+ let datanode_clients = Arc::new(DatanodeClients::new());
+
+ let mut datanode_instances = HashMap::new();
+ for datanode_id in 1..=4 {
+ let dn_instance = create_dist_datanode_instance(datanode_id, meta_srv.clone()).await;
+ datanode_instances.insert(datanode_id, dn_instance.clone());
+
+ let (addr, client) = create_datanode_client(dn_instance).await;
+ datanode_clients
+ .insert_client(Peer::new(datanode_id, addr), client)
+ .await;
+ }
+
+ let MockInfo {
+ server_addr,
+ channel_manager,
+ } = meta_srv.clone();
+ let mut meta_client = MetaClientBuilder::new(1000, 0)
+ .enable_router()
+ .enable_store()
+ .channel_manager(channel_manager)
+ .build();
+ meta_client.start(&[&server_addr]).await.unwrap();
+ let meta_client = Arc::new(meta_client);
+
+ let meta_backend = Arc::new(MetaKvBackend {
+ client: meta_client.clone(),
+ });
+ let table_routes = Arc::new(TableRoutes::new(meta_client.clone()));
+ let catalog_manager = Arc::new(FrontendCatalogManager::new(
+ meta_backend,
+ table_routes.clone(),
+ datanode_clients.clone(),
+ ));
+
+ wait_datanodes_alive(kv_store).await;
+
+ let dist_instance = DistInstance::new(
+ meta_client.clone(),
+ catalog_manager,
+ datanode_clients.clone(),
+ );
+ (dist_instance, datanode_instances)
+}
diff --git a/src/script/src/python/builtins/mod.rs b/src/script/src/python/builtins/mod.rs
index 5bee6e5577c8..679d91289b52 100644
--- a/src/script/src/python/builtins/mod.rs
+++ b/src/script/src/python/builtins/mod.rs
@@ -15,7 +15,6 @@
//! Builtin module contains GreptimeDB builtin udf/udaf
#[cfg(test)]
-#[allow(clippy::print_stdout)]
mod test;
use datafusion_common::{DataFusionError, ScalarValue};
diff --git a/src/script/src/python/builtins/test.rs b/src/script/src/python/builtins/test.rs
index 8fdeb9ad9439..39caf399e222 100644
--- a/src/script/src/python/builtins/test.rs
+++ b/src/script/src/python/builtins/test.rs
@@ -18,6 +18,7 @@ use std::io::Read;
use std::path::Path;
use std::sync::Arc;
+use common_telemetry::{error, info};
use datatypes::arrow::array::{Float64Array, Int64Array, PrimitiveArray};
use datatypes::arrow::compute::cast::CastOptions;
use datatypes::arrow::datatypes::DataType;
@@ -331,6 +332,8 @@ impl PyValue {
#[test]
fn run_builtin_fn_testcases() {
+ common_telemetry::init_default_ut_logging();
+
let loc = Path::new("src/python/builtins/testcases.ron");
let loc = loc.to_str().expect("Fail to parse path");
let mut file = File::open(loc).expect("Fail to open file");
@@ -343,7 +346,7 @@ fn run_builtin_fn_testcases() {
PyVector::make_class(&vm.ctx);
});
for (idx, case) in testcases.into_iter().enumerate() {
- print!("Testcase {idx} ...");
+ info!("Testcase {idx} ...");
cached_vm
.enter(|vm| {
let scope = vm.new_scope_with_builtins();
@@ -368,7 +371,7 @@ fn run_builtin_fn_testcases() {
let err_res = format_py_error(e, vm).to_string();
match case.expect{
Ok(v) => {
- println!("\nError:\n{err_res}");
+ error!("\nError:\n{err_res}");
panic!("Expect Ok: {v:?}, found Error");
},
Err(err) => {
@@ -397,7 +400,6 @@ fn run_builtin_fn_testcases() {
}
};
});
- println!(" passed!");
}
}
@@ -443,6 +445,8 @@ fn set_lst_of_vecs_in_scope(
#[allow(unused_must_use)]
#[test]
fn test_vm() {
+ common_telemetry::init_default_ut_logging();
+
rustpython_vm::Interpreter::with_init(Default::default(), |vm| {
vm.add_native_module("udf_builtins", Box::new(greptime_builtin::make_module));
// this can be in `.enter()` closure, but for clearity, put it in the `with_init()`
@@ -471,11 +475,10 @@ sin(values)"#,
.map_err(|err| vm.new_syntax_error(&err))
.unwrap();
let res = vm.run_code_obj(code_obj, scope);
- println!("{:#?}", res);
match res {
Err(e) => {
let err_res = format_py_error(e, vm).to_string();
- println!("Error:\n{err_res}");
+ error!("Error:\n{err_res}");
}
Ok(obj) => {
let _ser = PyValue::from_py_obj(&obj, vm);
diff --git a/src/script/src/python/test.rs b/src/script/src/python/test.rs
index 5790ce281c5b..4c0bcdcd25f3 100644
--- a/src/script/src/python/test.rs
+++ b/src/script/src/python/test.rs
@@ -20,6 +20,7 @@ use std::io::prelude::*;
use std::path::Path;
use std::sync::Arc;
+use common_telemetry::{error, info};
use console::style;
use datafusion_common::record_batch::RecordBatch as DfRecordBatch;
use datatypes::arrow::array::PrimitiveArray;
@@ -82,6 +83,8 @@ fn create_sample_recordbatch() -> DfRecordBatch {
/// and exec/parse (depending on the type of predicate) then decide if result is as expected
#[test]
fn run_ron_testcases() {
+ common_telemetry::init_default_ut_logging();
+
let loc = Path::new("src/python/testcases.ron");
let loc = loc.to_str().expect("Fail to parse path");
let mut file = File::open(loc).expect("Fail to open file");
@@ -89,9 +92,9 @@ fn run_ron_testcases() {
file.read_to_string(&mut buf)
.expect("Fail to read to string");
let testcases: Vec<TestCase> = from_ron_string(&buf).expect("Fail to convert to testcases");
- println!("Read {} testcases from {}", testcases.len(), loc);
+ info!("Read {} testcases from {}", testcases.len(), loc);
for testcase in testcases {
- print!(".ron test {}", testcase.name);
+ info!(".ron test {}", testcase.name);
match testcase.predicate {
Predicate::ParseIsOk { result } => {
let copr = parse_and_compile_copr(&testcase.code);
@@ -101,21 +104,19 @@ fn run_ron_testcases() {
}
Predicate::ParseIsErr { reason } => {
let copr = parse_and_compile_copr(&testcase.code);
- if copr.is_ok() {
- eprintln!("Expect to be err, found{copr:#?}");
- panic!()
- }
+ assert!(copr.is_err(), "Expect to be err, actual {copr:#?}");
+
let res = &copr.unwrap_err();
- println!(
+ error!(
"{}",
pretty_print_error_in_src(&testcase.code, res, 0, "<embedded>")
);
let (res, _) = get_error_reason_loc(res);
- if !res.contains(&reason) {
- eprintln!("{}", testcase.code);
- eprintln!("Parse Error, expect \"{reason}\" in \"{res}\", but not found.");
- panic!()
- }
+ assert!(
+ res.contains(&reason),
+ "{} Parse Error, expect \"{reason}\" in \"{res}\", actual not found.",
+ testcase.code,
+ );
}
Predicate::ExecIsOk { fields, columns } => {
let rb = create_sample_recordbatch();
@@ -129,28 +130,25 @@ fn run_ron_testcases() {
.iter()
.zip(&res.schema.arrow_schema().fields)
.map(|(anno, real)| {
- if !(anno.datatype.clone().unwrap() == real.data_type
- && anno.is_nullable == real.is_nullable)
- {
- eprintln!("fields expect to be {anno:#?}, found to be {real:#?}.");
- panic!()
- }
+ assert!(
+ anno.datatype.clone().unwrap() == real.data_type
+ && anno.is_nullable == real.is_nullable,
+ "Fields expected to be {anno:#?}, actual {real:#?}"
+ );
})
.count();
columns
.iter()
.zip(res.df_recordbatch.columns())
.map(|(anno, real)| {
- if !(&anno.ty == real.data_type() && anno.len == real.len()) {
- eprintln!(
- "Unmatch type or length!Expect [{:#?}; {}], found [{:#?}; {}]",
- anno.ty,
- anno.len,
- real.data_type(),
- real.len()
- );
- panic!()
- }
+ assert!(
+ &anno.ty == real.data_type() && anno.len == real.len(),
+ "Type or length not match! Expect [{:#?}; {}], actual [{:#?}; {}]",
+ anno.ty,
+ anno.len,
+ real.data_type(),
+ real.len()
+ );
})
.count();
}
@@ -159,28 +157,24 @@ fn run_ron_testcases() {
} => {
let rb = create_sample_recordbatch();
let res = coprocessor::exec_coprocessor(&testcase.code, &rb);
+ assert!(res.is_err(), "{:#?}\nExpect Err(...), actual Ok(...)", res);
if let Err(res) = res {
- println!(
+ error!(
"{}",
pretty_print_error_in_src(&testcase.code, &res, 1120, "<embedded>")
);
let (reason, _) = get_error_reason_loc(&res);
- if !reason.contains(&part_reason) {
- eprintln!(
- "{}\nExecute error, expect \"{reason}\" in \"{res}\", but not found.",
- testcase.code,
- reason = style(reason).green(),
- res = style(res).red()
- );
- panic!()
- }
- } else {
- eprintln!("{:#?}\nExpect Err(...), found Ok(...)", res);
- panic!();
+ assert!(
+ reason.contains(&part_reason),
+ "{}\nExecute error, expect \"{reason}\" in \"{res}\", actual not found.",
+ testcase.code,
+ reason = style(reason).green(),
+ res = style(res).red()
+ )
}
}
}
- println!(" ... {}", style("ok✅").green());
+ info!(" ... {}", style("ok✅").green());
}
}
@@ -275,7 +269,7 @@ def calc_rvs(open_time, close):
0,
"copr.py",
);
- println!("{res}");
+ info!("{res}");
} else if let Ok(res) = ret {
dbg!(&res);
} else {
@@ -319,7 +313,7 @@ def a(cpu, mem):
0,
"copr.py",
);
- println!("{res}");
+ info!("{res}");
} else if let Ok(res) = ret {
dbg!(&res);
} else {
diff --git a/src/script/src/python/vector.rs b/src/script/src/python/vector.rs
index 951ad2f9537e..448df3e62eed 100644
--- a/src/script/src/python/vector.rs
+++ b/src/script/src/python/vector.rs
@@ -1039,6 +1039,7 @@ pub mod tests {
use std::sync::Arc;
+ use common_telemetry::info;
use datatypes::vectors::{Float32Vector, Int32Vector, NullVector};
use rustpython_vm::builtins::PyList;
use rustpython_vm::class::PyClassImpl;
@@ -1170,9 +1171,10 @@ pub mod tests {
}
#[test]
- #[allow(clippy::print_stdout)]
// for debug purpose, also this is already a test function so allow print_stdout shouldn't be a problem?
fn test_execute_script() {
+ common_telemetry::init_default_ut_logging();
+
fn is_eq<T: std::cmp::PartialEq + rustpython_vm::TryFromObject>(
v: PyResult,
i: T,
@@ -1221,7 +1223,7 @@ pub mod tests {
for (code, pred) in snippet {
let result = execute_script(&interpreter, code, None, pred);
- println!(
+ info!(
"\u{001B}[35m{code}\u{001B}[0m: {:?}{}",
result.clone().map(|v| v.0),
result
diff --git a/src/session/src/context.rs b/src/session/src/context.rs
index aec55ac94117..2a6f9bbe7295 100644
--- a/src/session/src/context.rs
+++ b/src/session/src/context.rs
@@ -30,6 +30,10 @@ impl Default for QueryContext {
}
impl QueryContext {
+ pub fn arc() -> QueryContextRef {
+ Arc::new(QueryContext::new())
+ }
+
pub fn new() -> Self {
Self {
current_schema: ArcSwapOption::new(None),
diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs
index 70a3355f3dc2..501879023b0c 100644
--- a/tests-integration/src/test_util.rs
+++ b/tests-integration/src/test_util.rs
@@ -29,8 +29,6 @@ use datanode::instance::{Instance, InstanceRef};
use datanode::sql::SqlHandler;
use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnSchema, SchemaBuilder};
-use frontend::frontend::FrontendOptions;
-use frontend::grpc::GrpcOptions;
use frontend::instance::{FrontendInstance, Instance as FeInstance};
use object_store::backend::s3;
use object_store::test_util::TempFolder;
@@ -215,8 +213,7 @@ pub async fn create_test_table(
}
async fn build_frontend_instance(datanode_instance: InstanceRef) -> FeInstance {
- let fe_opts = FrontendOptions::default();
- let mut frontend_instance = FeInstance::try_new(&fe_opts).await.unwrap();
+ let mut frontend_instance = FeInstance::new_standalone(datanode_instance.clone());
frontend_instance.set_catalog_manager(datanode_instance.catalog_manager().clone());
frontend_instance.set_script_handler(datanode_instance);
frontend_instance
@@ -262,19 +259,13 @@ pub async fn setup_test_app_with_frontend(
pub async fn setup_grpc_server(
store_type: StorageType,
name: &str,
-) -> (String, TestGuard, Arc<GrpcServer>, Arc<GrpcServer>) {
+) -> (String, TestGuard, Arc<GrpcServer>) {
common_telemetry::init_default_ut_logging();
- let datanode_port = get_port();
- let frontend_port = get_port();
-
- let (mut opts, guard) = create_tmp_dir_and_datanode_opts(store_type, name);
- let datanode_grpc_addr = format!("127.0.0.1:{}", datanode_port);
- opts.rpc_addr = datanode_grpc_addr.clone();
+ let (opts, guard) = create_tmp_dir_and_datanode_opts(store_type, name);
let instance = Arc::new(Instance::with_mock_meta_client(&opts).await.unwrap());
instance.start().await.unwrap();
- let datanode_grpc_addr = datanode_grpc_addr.clone();
let runtime = Arc::new(
RuntimeBuilder::default()
.worker_threads(2)
@@ -283,26 +274,9 @@ pub async fn setup_grpc_server(
.unwrap(),
);
- let fe_grpc_addr = format!("127.0.0.1:{}", frontend_port);
- let fe_opts = FrontendOptions {
- mode: Mode::Standalone,
- datanode_rpc_addr: datanode_grpc_addr.clone(),
- grpc_options: Some(GrpcOptions {
- addr: fe_grpc_addr.clone(),
- runtime_size: 8,
- }),
- ..Default::default()
- };
-
- let datanode_grpc_server = Arc::new(GrpcServer::new(
- instance.clone(),
- instance.clone(),
- runtime.clone(),
- ));
+ let fe_grpc_addr = format!("127.0.0.1:{}", get_port());
- let mut fe_instance = frontend::instance::Instance::try_new(&fe_opts)
- .await
- .unwrap();
+ let mut fe_instance = frontend::instance::Instance::new_standalone(instance.clone());
fe_instance.set_catalog_manager(instance.catalog_manager().clone());
let fe_instance_ref = Arc::new(fe_instance);
@@ -319,15 +293,8 @@ pub async fn setup_grpc_server(
grpc_server_clone.start(addr).await.unwrap()
});
- let dn_grpc_addr_clone = datanode_grpc_addr.clone();
- let dn_grpc_server_clone = datanode_grpc_server.clone();
- tokio::spawn(async move {
- let addr = dn_grpc_addr_clone.parse::<SocketAddr>().unwrap();
- dn_grpc_server_clone.start(addr).await.unwrap()
- });
-
// wait for GRPC server to start
tokio::time::sleep(Duration::from_secs(1)).await;
- (fe_grpc_addr, guard, fe_grpc_server, datanode_grpc_server)
+ (fe_grpc_addr, guard, fe_grpc_server)
}
diff --git a/tests-integration/tests/grpc.rs b/tests-integration/tests/grpc.rs
index cf6ba4b922f9..6f94aff3e54a 100644
--- a/tests-integration/tests/grpc.rs
+++ b/tests-integration/tests/grpc.rs
@@ -61,14 +61,13 @@ macro_rules! grpc_tests {
}
pub async fn test_auto_create_table(store_type: StorageType) {
- let (addr, mut guard, fe_grpc_server, dn_grpc_server) =
+ let (addr, mut guard, fe_grpc_server) =
setup_grpc_server(store_type, "auto_create_table").await;
let grpc_client = Client::with_urls(vec![addr]);
let db = Database::new("greptime", grpc_client);
insert_and_assert(&db).await;
let _ = fe_grpc_server.shutdown().await;
- let _ = dn_grpc_server.shutdown().await;
guard.remove_all().await;
}
@@ -128,7 +127,7 @@ fn expect_data() -> (Column, Column, Column, Column) {
pub async fn test_insert_and_select(store_type: StorageType) {
common_telemetry::init_default_ut_logging();
- let (addr, mut guard, fe_grpc_server, dn_grpc_server) =
+ let (addr, mut guard, fe_grpc_server) =
setup_grpc_server(store_type, "insert_and_select").await;
let grpc_client = Client::with_urls(vec![addr]);
@@ -173,7 +172,6 @@ pub async fn test_insert_and_select(store_type: StorageType) {
insert_and_assert(&db).await;
let _ = fe_grpc_server.shutdown().await;
- let _ = dn_grpc_server.shutdown().await;
guard.remove_all().await;
}
|
refactor
|
directly invoke Datanode methods in standalone mode (part 1) (#694)
|
5f87b1f71485e747492ba2374a903494f5e3ad43
|
2023-11-20 22:21:42
|
zyy17
|
ci: add ubuntu:18.10 dev-builder for using old version glibc(>=2.28) (#2779)
| false
|
diff --git a/docker/dev-builder/ubuntu/Dockerfile-18.10 b/docker/dev-builder/ubuntu/Dockerfile-18.10
new file mode 100644
index 000000000000..a9ba1a9f567e
--- /dev/null
+++ b/docker/dev-builder/ubuntu/Dockerfile-18.10
@@ -0,0 +1,47 @@
+# Use the legacy glibc 2.28.
+FROM ubuntu:18.10
+
+ENV LANG en_US.utf8
+WORKDIR /greptimedb
+
+# Use old-releases.ubuntu.com to avoid 404s: https://help.ubuntu.com/community/EOLUpgrades.
+RUN echo "deb http://old-releases.ubuntu.com/ubuntu/ cosmic main restricted universe multiverse\n\
+deb http://old-releases.ubuntu.com/ubuntu/ cosmic-updates main restricted universe multiverse\n\
+deb http://old-releases.ubuntu.com/ubuntu/ cosmic-security main restricted universe multiverse" > /etc/apt/sources.list
+
+# Install dependencies.
+RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
+ libssl-dev \
+ tzdata \
+ curl \
+ ca-certificates \
+ git \
+ build-essential \
+ unzip \
+ pkg-config
+
+# Install protoc.
+ENV PROTOC_VERSION=25.1
+RUN if [ "$(uname -m)" = "x86_64" ]; then \
+ PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-x86_64.zip; \
+ elif [ "$(uname -m)" = "aarch64" ]; then \
+ PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-aarch_64.zip; \
+ else \
+ echo "Unsupported architecture"; exit 1; \
+ fi && \
+ curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/${PROTOC_ZIP} && \
+ unzip -o ${PROTOC_ZIP} -d /usr/local bin/protoc && \
+ unzip -o ${PROTOC_ZIP} -d /usr/local 'include/*' && \
+ rm -f ${PROTOC_ZIP}
+
+# Install Rust.
+SHELL ["/bin/bash", "-c"]
+RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
+ENV PATH /root/.cargo/bin/:$PATH
+
+# Install Rust toolchains.
+ARG RUST_TOOLCHAIN
+RUN rustup toolchain install ${RUST_TOOLCHAIN}
+
+# Install nextest.
+RUN cargo install cargo-nextest --locked
|
ci
|
add ubuntu:18.10 dev-builder for using old version glibc(>=2.28) (#2779)
|
7c5ead90ac3ab6cb5196d5faa53d5883d31840d5
|
2025-01-24 19:12:27
|
Ruihang Xia
|
feat: mirror insert request to flownode in async (#5444)
| false
|
diff --git a/src/operator/src/insert.rs b/src/operator/src/insert.rs
index 47525d13cad1..88ab366bcab1 100644
--- a/src/operator/src/insert.rs
+++ b/src/operator/src/insert.rs
@@ -34,7 +34,7 @@ use common_meta::peer::Peer;
use common_query::prelude::{GREPTIME_TIMESTAMP, GREPTIME_VALUE};
use common_query::Output;
use common_telemetry::tracing_context::TracingContext;
-use common_telemetry::{error, info, warn};
+use common_telemetry::{error, info};
use futures_util::future;
use meter_macros::write_meter;
use partition::manager::PartitionRuleManagerRef;
@@ -338,50 +338,18 @@ impl Inserter {
instant_requests,
} = requests;
- // Mirror requests for source table to flownode
- match self
- .mirror_flow_node_requests(
- normal_requests
- .requests
- .iter()
- .chain(instant_requests.requests.iter()),
- )
- .await
- {
- Ok(flow_requests) => {
- let node_manager = self.node_manager.clone();
- let flow_tasks = flow_requests.into_iter().map(|(peer, inserts)| {
- let node_manager = node_manager.clone();
- common_runtime::spawn_global(async move {
- node_manager
- .flownode(&peer)
- .await
- .handle_inserts(inserts)
- .await
- .context(RequestInsertsSnafu)
- })
- });
-
- match future::try_join_all(flow_tasks)
- .await
- .context(JoinTaskSnafu)
- {
- Ok(ret) => {
- let affected_rows = ret
- .into_iter()
- .map(|resp| resp.map(|r| r.affected_rows))
- .sum::<Result<u64>>()
- .unwrap_or(0);
- crate::metrics::DIST_MIRROR_ROW_COUNT.inc_by(affected_rows);
- }
- Err(err) => {
- warn!(err; "Failed to insert data into flownode");
- }
- }
- }
- Err(err) => warn!(err; "Failed to mirror request to flownode"),
- }
+ // Mirror requests for source table to flownode asynchronously
+ let flow_mirror_task = FlowMirrorTask::new(
+ &self.table_flownode_set_cache,
+ normal_requests
+ .requests
+ .iter()
+ .chain(instant_requests.requests.iter()),
+ )
+ .await?;
+ flow_mirror_task.detach(self.node_manager.clone())?;
+ // Write requests to datanode and wait for response
let write_tasks = self
.group_requests_by_peer(normal_requests)
.await?
@@ -412,72 +380,6 @@ impl Inserter {
))
}
- /// Mirror requests for source table to flownode
- async fn mirror_flow_node_requests<'it, 'zelf: 'it>(
- &'zelf self,
- requests: impl Iterator<Item = &'it RegionInsertRequest>,
- ) -> Result<HashMap<Peer, RegionInsertRequests>> {
- // store partial source table requests used by flow node(only store what's used)
- let mut src_table_reqs: HashMap<TableId, Option<(Vec<Peer>, RegionInsertRequests)>> =
- HashMap::new();
- for req in requests {
- let table_id = RegionId::from_u64(req.region_id).table_id();
- match src_table_reqs.get_mut(&table_id) {
- Some(Some((_peers, reqs))) => reqs.requests.push(req.clone()),
- // already know this is not source table
- Some(None) => continue,
- _ => {
- let peers = self
- .table_flownode_set_cache
- .get(table_id)
- .await
- .context(RequestInsertsSnafu)?
- .unwrap_or_default()
- .values()
- .cloned()
- .collect::<Vec<_>>();
-
- if !peers.is_empty() {
- let mut reqs = RegionInsertRequests::default();
- reqs.requests.push(req.clone());
- src_table_reqs.insert(table_id, Some((peers, reqs)));
- } else {
- // insert a empty entry to avoid repeat query
- src_table_reqs.insert(table_id, None);
- }
- }
- }
- }
-
- let mut inserts: HashMap<Peer, RegionInsertRequests> = HashMap::new();
-
- for (_table_id, (peers, reqs)) in src_table_reqs
- .into_iter()
- .filter_map(|(k, v)| v.map(|v| (k, v)))
- {
- if peers.len() == 1 {
- // fast path, zero copy
- inserts
- .entry(peers[0].clone())
- .or_default()
- .requests
- .extend(reqs.requests);
- continue;
- } else {
- // TODO(discord9): need to split requests to multiple flownodes
- for flownode in peers {
- inserts
- .entry(flownode.clone())
- .or_default()
- .requests
- .extend(reqs.requests.clone());
- }
- }
- }
-
- Ok(inserts)
- }
-
async fn group_requests_by_peer(
&self,
requests: RegionInsertRequests,
@@ -915,3 +817,111 @@ struct CreateAlterTableResult {
/// Table Info of the created tables.
table_infos: HashMap<TableId, Arc<TableInfo>>,
}
+
+struct FlowMirrorTask {
+ requests: HashMap<Peer, RegionInsertRequests>,
+ num_rows: usize,
+}
+
+impl FlowMirrorTask {
+ async fn new(
+ cache: &TableFlownodeSetCacheRef,
+ requests: impl Iterator<Item = &RegionInsertRequest>,
+ ) -> Result<Self> {
+ let mut src_table_reqs: HashMap<TableId, Option<(Vec<Peer>, RegionInsertRequests)>> =
+ HashMap::new();
+ let mut num_rows = 0;
+
+ for req in requests {
+ let table_id = RegionId::from_u64(req.region_id).table_id();
+ match src_table_reqs.get_mut(&table_id) {
+ Some(Some((_peers, reqs))) => reqs.requests.push(req.clone()),
+ // already know this is not source table
+ Some(None) => continue,
+ _ => {
+ let peers = cache
+ .get(table_id)
+ .await
+ .context(RequestInsertsSnafu)?
+ .unwrap_or_default()
+ .values()
+ .cloned()
+ .collect::<Vec<_>>();
+
+ if !peers.is_empty() {
+ let mut reqs = RegionInsertRequests::default();
+ reqs.requests.push(req.clone());
+ num_rows += reqs
+ .requests
+ .iter()
+ .map(|r| r.rows.as_ref().unwrap().rows.len())
+ .sum::<usize>();
+ src_table_reqs.insert(table_id, Some((peers, reqs)));
+ } else {
+ // insert a empty entry to avoid repeat query
+ src_table_reqs.insert(table_id, None);
+ }
+ }
+ }
+ }
+
+ let mut inserts: HashMap<Peer, RegionInsertRequests> = HashMap::new();
+
+ for (_table_id, (peers, reqs)) in src_table_reqs
+ .into_iter()
+ .filter_map(|(k, v)| v.map(|v| (k, v)))
+ {
+ if peers.len() == 1 {
+ // fast path, zero copy
+ inserts
+ .entry(peers[0].clone())
+ .or_default()
+ .requests
+ .extend(reqs.requests);
+ continue;
+ } else {
+ // TODO(discord9): need to split requests to multiple flownodes
+ for flownode in peers {
+ inserts
+ .entry(flownode.clone())
+ .or_default()
+ .requests
+ .extend(reqs.requests.clone());
+ }
+ }
+ }
+
+ Ok(Self {
+ requests: inserts,
+ num_rows,
+ })
+ }
+
+ fn detach(self, node_manager: NodeManagerRef) -> Result<()> {
+ crate::metrics::DIST_MIRROR_PENDING_ROW_COUNT.add(self.num_rows as i64);
+ for (peer, inserts) in self.requests {
+ let node_manager = node_manager.clone();
+ common_runtime::spawn_global(async move {
+ let result = node_manager
+ .flownode(&peer)
+ .await
+ .handle_inserts(inserts)
+ .await
+ .context(RequestInsertsSnafu);
+
+ match result {
+ Ok(resp) => {
+ let affected_rows = resp.affected_rows;
+ crate::metrics::DIST_MIRROR_ROW_COUNT.inc_by(affected_rows);
+ crate::metrics::DIST_MIRROR_PENDING_ROW_COUNT.sub(affected_rows as _);
+ }
+ Err(err) => {
+ error!(err; "Failed to insert data into flownode {}", peer);
+ }
+ }
+ });
+ }
+
+ Ok(())
+ }
+}
diff --git a/src/operator/src/metrics.rs b/src/operator/src/metrics.rs
index 9a77f9844d38..e6a4827e48cd 100644
--- a/src/operator/src/metrics.rs
+++ b/src/operator/src/metrics.rs
@@ -41,6 +41,11 @@ lazy_static! {
"table operator mirror rows"
)
.unwrap();
+ pub static ref DIST_MIRROR_PENDING_ROW_COUNT: IntGauge = register_int_gauge!(
+ "greptime_table_operator_mirror_pending_rows",
+ "table operator mirror pending rows"
+ )
+ .unwrap();
pub static ref DIST_DELETE_ROW_COUNT: IntCounter = register_int_counter!(
"greptime_table_operator_delete_rows",
"table operator delete rows"
|
feat
|
mirror insert request to flownode in async (#5444)
|
fd3f23ea15c646ab21da3f172565dce309150f99
|
2024-01-10 16:21:30
|
dennis zhuang
|
feat: adds runtime_metrics (#3127)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 0a83a7d08613..3b93683d1269 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1206,6 +1206,7 @@ dependencies = [
"datatypes",
"futures",
"futures-util",
+ "itertools 0.10.5",
"lazy_static",
"log-store",
"meta-client",
diff --git a/src/catalog/Cargo.toml b/src/catalog/Cargo.toml
index 715324e7a2db..656cc9fd1ee2 100644
--- a/src/catalog/Cargo.toml
+++ b/src/catalog/Cargo.toml
@@ -30,6 +30,7 @@ datafusion.workspace = true
datatypes.workspace = true
futures = "0.3"
futures-util.workspace = true
+itertools.workspace = true
lazy_static.workspace = true
meta-client.workspace = true
moka = { workspace = true, features = ["future"] }
diff --git a/src/catalog/src/information_schema.rs b/src/catalog/src/information_schema.rs
index 84be9576f610..c39de7c4881a 100644
--- a/src/catalog/src/information_schema.rs
+++ b/src/catalog/src/information_schema.rs
@@ -16,6 +16,7 @@ mod columns;
mod key_column_usage;
mod memory_table;
mod predicate;
+mod runtime_metrics;
mod schemata;
mod table_names;
mod tables;
@@ -23,7 +24,7 @@ mod tables;
use std::collections::HashMap;
use std::sync::{Arc, Weak};
-use common_catalog::consts::{self, INFORMATION_SCHEMA_NAME};
+use common_catalog::consts::{self, DEFAULT_CATALOG_NAME, INFORMATION_SCHEMA_NAME};
use common_error::ext::BoxedError;
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
use datatypes::schema::SchemaRef;
@@ -46,6 +47,7 @@ use self::columns::InformationSchemaColumns;
use crate::error::Result;
use crate::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
use crate::information_schema::memory_table::{get_schema_columns, MemoryTable};
+use crate::information_schema::runtime_metrics::InformationSchemaMetrics;
use crate::information_schema::schemata::InformationSchemaSchemata;
use crate::information_schema::tables::InformationSchemaTables;
use crate::CatalogManager;
@@ -56,7 +58,6 @@ lazy_static! {
ENGINES,
COLUMN_PRIVILEGES,
COLUMN_STATISTICS,
- BUILD_INFO,
CHARACTER_SETS,
COLLATIONS,
COLLATION_CHARACTER_SET_APPLICABILITY,
@@ -142,6 +143,21 @@ impl InformationSchemaProvider {
fn build_tables(&mut self) {
let mut tables = HashMap::new();
+
+ // Carefully consider the tables that may expose sensitive cluster configurations,
+ // authentication details, and other critical information.
+ // Only put these tables under `greptime` catalog to prevent info leak.
+ if self.catalog_name == DEFAULT_CATALOG_NAME {
+ tables.insert(
+ RUNTIME_METRICS.to_string(),
+ self.build_table(RUNTIME_METRICS).unwrap(),
+ );
+ tables.insert(
+ BUILD_INFO.to_string(),
+ self.build_table(BUILD_INFO).unwrap(),
+ );
+ }
+
tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
tables.insert(SCHEMATA.to_string(), self.build_table(SCHEMATA).unwrap());
tables.insert(COLUMNS.to_string(), self.build_table(COLUMNS).unwrap());
@@ -209,6 +225,7 @@ impl InformationSchemaProvider {
self.catalog_name.clone(),
self.catalog_manager.clone(),
)) as _),
+ RUNTIME_METRICS => Some(Arc::new(InformationSchemaMetrics::new())),
_ => None,
}
}
diff --git a/src/catalog/src/information_schema/runtime_metrics.rs b/src/catalog/src/information_schema/runtime_metrics.rs
new file mode 100644
index 000000000000..52233db39111
--- /dev/null
+++ b/src/catalog/src/information_schema/runtime_metrics.rs
@@ -0,0 +1,250 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use arrow_schema::SchemaRef as ArrowSchemaRef;
+use common_catalog::consts::INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID;
+use common_error::ext::BoxedError;
+use common_query::physical_plan::TaskContext;
+use common_recordbatch::adapter::RecordBatchStreamAdapter;
+use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
+use common_time::util::current_time_millis;
+use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
+use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
+use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
+use datatypes::prelude::{ConcreteDataType, MutableVector};
+use datatypes::scalars::ScalarVectorBuilder;
+use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
+use datatypes::vectors::{
+ ConstantVector, Float64VectorBuilder, StringVector, StringVectorBuilder,
+ TimestampMillisecondVector, VectorRef,
+};
+use itertools::Itertools;
+use snafu::ResultExt;
+use store_api::storage::{ScanRequest, TableId};
+
+use super::{InformationTable, RUNTIME_METRICS};
+use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
+
+pub(super) struct InformationSchemaMetrics {
+ schema: SchemaRef,
+}
+
+const METRIC_NAME: &str = "metric_name";
+const METRIC_VALUE: &str = "value";
+const METRIC_LABELS: &str = "labels";
+const NODE: &str = "node";
+const NODE_TYPE: &str = "node_type";
+const TIMESTAMP: &str = "timestamp";
+
+/// The `information_schema.runtime_metrics` virtual table.
+/// It provides the GreptimeDB runtime metrics for the users by SQL.
+impl InformationSchemaMetrics {
+ pub(super) fn new() -> Self {
+ Self {
+ schema: Self::schema(),
+ }
+ }
+
+ fn schema() -> SchemaRef {
+ Arc::new(Schema::new(vec![
+ ColumnSchema::new(METRIC_NAME, ConcreteDataType::string_datatype(), false),
+ ColumnSchema::new(METRIC_VALUE, ConcreteDataType::float64_datatype(), false),
+ ColumnSchema::new(METRIC_LABELS, ConcreteDataType::string_datatype(), true),
+ ColumnSchema::new(NODE, ConcreteDataType::string_datatype(), false),
+ ColumnSchema::new(NODE_TYPE, ConcreteDataType::string_datatype(), false),
+ ColumnSchema::new(
+ TIMESTAMP,
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ false,
+ ),
+ ]))
+ }
+
+ fn builder(&self) -> InformationSchemaMetricsBuilder {
+ InformationSchemaMetricsBuilder::new(self.schema.clone())
+ }
+}
+
+impl InformationTable for InformationSchemaMetrics {
+ fn table_id(&self) -> TableId {
+ INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID
+ }
+
+ fn table_name(&self) -> &'static str {
+ RUNTIME_METRICS
+ }
+
+ fn schema(&self) -> SchemaRef {
+ self.schema.clone()
+ }
+
+ fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
+ let schema = self.schema.arrow_schema().clone();
+ let mut builder = self.builder();
+ let stream = Box::pin(DfRecordBatchStreamAdapter::new(
+ schema,
+ futures::stream::once(async move {
+ builder
+ .make_metrics(Some(request))
+ .await
+ .map(|x| x.into_df_record_batch())
+ .map_err(Into::into)
+ }),
+ ));
+ Ok(Box::pin(
+ RecordBatchStreamAdapter::try_new(stream)
+ .map_err(BoxedError::new)
+ .context(InternalSnafu)?,
+ ))
+ }
+}
+
+struct InformationSchemaMetricsBuilder {
+ schema: SchemaRef,
+
+ metric_names: StringVectorBuilder,
+ metric_values: Float64VectorBuilder,
+ metric_labels: StringVectorBuilder,
+}
+
+impl InformationSchemaMetricsBuilder {
+ fn new(schema: SchemaRef) -> Self {
+ Self {
+ schema,
+ metric_names: StringVectorBuilder::with_capacity(42),
+ metric_values: Float64VectorBuilder::with_capacity(42),
+ metric_labels: StringVectorBuilder::with_capacity(42),
+ }
+ }
+
+ fn add_metric(&mut self, metric_name: &str, labels: String, metric_value: f64) {
+ self.metric_names.push(Some(metric_name));
+ self.metric_values.push(Some(metric_value));
+ self.metric_labels.push(Some(&labels));
+ }
+
+ async fn make_metrics(&mut self, _request: Option<ScanRequest>) -> Result<RecordBatch> {
+ let metric_families = prometheus::gather();
+
+ let write_request =
+ common_telemetry::metric::convert_metric_to_write_request(metric_families, None, 0);
+
+ for ts in write_request.timeseries {
+ //Safety: always has `__name__` label
+ let metric_name = ts
+ .labels
+ .iter()
+ .find_map(|label| {
+ if label.name == "__name__" {
+ Some(label.value.clone())
+ } else {
+ None
+ }
+ })
+ .unwrap();
+
+ self.add_metric(
+ &metric_name,
+ ts.labels
+ .into_iter()
+ .filter_map(|label| {
+ if label.name == "__name__" {
+ None
+ } else {
+ Some(format!("{}={}", label.name, label.value))
+ }
+ })
+ .join(", "),
+ // Safety: always has a sample
+ ts.samples[0].value,
+ );
+ }
+
+ self.finish()
+ }
+
+ fn finish(&mut self) -> Result<RecordBatch> {
+ let rows_num = self.metric_names.len();
+ let unknowns = Arc::new(ConstantVector::new(
+ Arc::new(StringVector::from(vec!["unknown"])),
+ rows_num,
+ ));
+ let timestamps = Arc::new(ConstantVector::new(
+ Arc::new(TimestampMillisecondVector::from_slice([
+ current_time_millis(),
+ ])),
+ rows_num,
+ ));
+
+ let columns: Vec<VectorRef> = vec![
+ Arc::new(self.metric_names.finish()),
+ Arc::new(self.metric_values.finish()),
+ Arc::new(self.metric_labels.finish()),
+ // TODO(dennis): supports node and node_type for cluster
+ unknowns.clone(),
+ unknowns,
+ timestamps,
+ ];
+
+ RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
+ }
+}
+
+impl DfPartitionStream for InformationSchemaMetrics {
+ fn schema(&self) -> &ArrowSchemaRef {
+ self.schema.arrow_schema()
+ }
+
+ fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
+ let schema = self.schema.arrow_schema().clone();
+ let mut builder = self.builder();
+ Box::pin(DfRecordBatchStreamAdapter::new(
+ schema,
+ futures::stream::once(async move {
+ builder
+ .make_metrics(None)
+ .await
+ .map(|x| x.into_df_record_batch())
+ .map_err(Into::into)
+ }),
+ ))
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use common_recordbatch::RecordBatches;
+
+ use super::*;
+
+ #[tokio::test]
+ async fn test_make_metrics() {
+ let metrics = InformationSchemaMetrics::new();
+
+ let stream = metrics.to_stream(ScanRequest::default()).unwrap();
+
+ let batches = RecordBatches::try_collect(stream).await.unwrap();
+
+ let result_literal = batches.pretty_print().unwrap();
+
+ assert!(result_literal.contains(METRIC_NAME));
+ assert!(result_literal.contains(METRIC_VALUE));
+ assert!(result_literal.contains(METRIC_LABELS));
+ assert!(result_literal.contains(NODE));
+ assert!(result_literal.contains(NODE_TYPE));
+ assert!(result_literal.contains(TIMESTAMP));
+ }
+}
diff --git a/src/catalog/src/information_schema/table_names.rs b/src/catalog/src/information_schema/table_names.rs
index ce252e2e47dd..e47b96d146ec 100644
--- a/src/catalog/src/information_schema/table_names.rs
+++ b/src/catalog/src/information_schema/table_names.rs
@@ -38,3 +38,4 @@ pub const TABLE_PRIVILEGES: &str = "table_privileges";
pub const TRIGGERS: &str = "triggers";
pub const GLOBAL_STATUS: &str = "global_status";
pub const SESSION_STATUS: &str = "session_status";
+pub const RUNTIME_METRICS: &str = "runtime_metrics";
diff --git a/src/common/catalog/src/consts.rs b/src/common/catalog/src/consts.rs
index 3e0510cd9215..2fd9aa92a89f 100644
--- a/src/common/catalog/src/consts.rs
+++ b/src/common/catalog/src/consts.rs
@@ -80,6 +80,8 @@ pub const INFORMATION_SCHEMA_TRIGGERS_TABLE_ID: u32 = 24;
pub const INFORMATION_SCHEMA_GLOBAL_STATUS_TABLE_ID: u32 = 25;
/// id for information_schema.SESSION_STATUS
pub const INFORMATION_SCHEMA_SESSION_STATUS_TABLE_ID: u32 = 26;
+/// id for information_schema.RUNTIME_METRICS
+pub const INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID: u32 = 27;
/// ----- End of information_schema tables -----
pub const MITO_ENGINE: &str = "mito";
diff --git a/tests/cases/standalone/common/show/show_databases_tables.result b/tests/cases/standalone/common/show/show_databases_tables.result
index 0f5615222726..c61743d193a3 100644
--- a/tests/cases/standalone/common/show/show_databases_tables.result
+++ b/tests/cases/standalone/common/show/show_databases_tables.result
@@ -20,7 +20,6 @@ show tables;
+---------------------------------------+
| Tables |
+---------------------------------------+
-| build_info |
| character_sets |
| check_constraints |
| collation_character_set_applicability |
diff --git a/tests/cases/standalone/common/system/information_schema.result b/tests/cases/standalone/common/system/information_schema.result
index 75a692b51f1e..8e9486f393df 100644
--- a/tests/cases/standalone/common/system/information_schema.result
+++ b/tests/cases/standalone/common/system/information_schema.result
@@ -12,7 +12,6 @@ order by table_schema, table_name;
+---------------+--------------------+---------------------------------------+-----------------+----------+-------------+
| table_catalog | table_schema | table_name | table_type | table_id | engine |
+---------------+--------------------+---------------------------------------+-----------------+----------+-------------+
-| greptime | information_schema | build_info | LOCAL TEMPORARY | 8 | |
| greptime | information_schema | character_sets | LOCAL TEMPORARY | 9 | |
| greptime | information_schema | check_constraints | LOCAL TEMPORARY | 12 | |
| greptime | information_schema | collation_character_set_applicability | LOCAL TEMPORARY | 11 | |
@@ -39,259 +38,254 @@ order by table_schema, table_name;
| greptime | public | numbers | LOCAL TEMPORARY | 2 | test_engine |
+---------------+--------------------+---------------------------------------+-----------------+----------+-------------+
-select * from information_schema.columns order by table_schema, table_name;
+select * from information_schema.columns order by table_schema, table_name, column_name;
+---------------+--------------------+---------------------------------------+-----------------------------------+-----------+---------------+----------------+-------------+-------------+----------------+
| table_catalog | table_schema | table_name | column_name | data_type | semantic_type | column_default | is_nullable | column_type | column_comment |
+---------------+--------------------+---------------------------------------+-----------------------------------+-----------+---------------+----------------+-------------+-------------+----------------+
-| greptime | information_schema | build_info | git_branch | String | FIELD | | No | String | |
-| greptime | information_schema | build_info | git_commit | String | FIELD | | No | String | |
-| greptime | information_schema | build_info | git_commit_short | String | FIELD | | No | String | |
-| greptime | information_schema | build_info | git_dirty | String | FIELD | | No | String | |
-| greptime | information_schema | build_info | pkg_version | String | FIELD | | No | String | |
-| greptime | information_schema | character_sets | maxlen | Int64 | FIELD | | No | Int64 | |
| greptime | information_schema | character_sets | character_set_name | String | FIELD | | No | String | |
| greptime | information_schema | character_sets | default_collate_name | String | FIELD | | No | String | |
| greptime | information_schema | character_sets | description | String | FIELD | | No | String | |
+| greptime | information_schema | character_sets | maxlen | Int64 | FIELD | | No | Int64 | |
| greptime | information_schema | check_constraints | check_clause | String | FIELD | | No | String | |
| greptime | information_schema | check_constraints | constraint_catalog | String | FIELD | | No | String | |
-| greptime | information_schema | check_constraints | constraint_schema | String | FIELD | | No | String | |
| greptime | information_schema | check_constraints | constraint_name | String | FIELD | | No | String | |
+| greptime | information_schema | check_constraints | constraint_schema | String | FIELD | | No | String | |
| greptime | information_schema | collation_character_set_applicability | character_set_name | String | FIELD | | No | String | |
| greptime | information_schema | collation_character_set_applicability | collation_name | String | FIELD | | No | String | |
| greptime | information_schema | collations | character_set_name | String | FIELD | | No | String | |
| greptime | information_schema | collations | collation_name | String | FIELD | | No | String | |
-| greptime | information_schema | collations | sortlen | Int64 | FIELD | | No | Int64 | |
| greptime | information_schema | collations | id | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | collations | is_default | String | FIELD | | No | String | |
| greptime | information_schema | collations | is_compiled | String | FIELD | | No | String | |
-| greptime | information_schema | column_privileges | table_schema | String | FIELD | | No | String | |
-| greptime | information_schema | column_privileges | table_catalog | String | FIELD | | No | String | |
-| greptime | information_schema | column_privileges | grantee | String | FIELD | | No | String | |
-| greptime | information_schema | column_privileges | table_name | String | FIELD | | No | String | |
+| greptime | information_schema | collations | is_default | String | FIELD | | No | String | |
+| greptime | information_schema | collations | sortlen | Int64 | FIELD | | No | Int64 | |
| greptime | information_schema | column_privileges | column_name | String | FIELD | | No | String | |
-| greptime | information_schema | column_privileges | privilege_type | String | FIELD | | No | String | |
+| greptime | information_schema | column_privileges | grantee | String | FIELD | | No | String | |
| greptime | information_schema | column_privileges | is_grantable | String | FIELD | | No | String | |
+| greptime | information_schema | column_privileges | privilege_type | String | FIELD | | No | String | |
+| greptime | information_schema | column_privileges | table_catalog | String | FIELD | | No | String | |
+| greptime | information_schema | column_privileges | table_name | String | FIELD | | No | String | |
+| greptime | information_schema | column_privileges | table_schema | String | FIELD | | No | String | |
+| greptime | information_schema | column_statistics | column_name | String | FIELD | | No | String | |
| greptime | information_schema | column_statistics | histogram | String | FIELD | | No | String | |
| greptime | information_schema | column_statistics | schema_name | String | FIELD | | No | String | |
| greptime | information_schema | column_statistics | table_name | String | FIELD | | No | String | |
-| greptime | information_schema | column_statistics | column_name | String | FIELD | | No | String | |
-| greptime | information_schema | columns | column_type | String | FIELD | | No | String | |
| greptime | information_schema | columns | column_comment | String | FIELD | | Yes | String | |
-| greptime | information_schema | columns | table_name | String | FIELD | | No | String | |
-| greptime | information_schema | columns | is_nullable | String | FIELD | | No | String | |
| greptime | information_schema | columns | column_default | String | FIELD | | Yes | String | |
+| greptime | information_schema | columns | column_name | String | FIELD | | No | String | |
+| greptime | information_schema | columns | column_type | String | FIELD | | No | String | |
+| greptime | information_schema | columns | data_type | String | FIELD | | No | String | |
+| greptime | information_schema | columns | is_nullable | String | FIELD | | No | String | |
| greptime | information_schema | columns | semantic_type | String | FIELD | | No | String | |
| greptime | information_schema | columns | table_catalog | String | FIELD | | No | String | |
+| greptime | information_schema | columns | table_name | String | FIELD | | No | String | |
| greptime | information_schema | columns | table_schema | String | FIELD | | No | String | |
-| greptime | information_schema | columns | data_type | String | FIELD | | No | String | |
-| greptime | information_schema | columns | column_name | String | FIELD | | No | String | |
-| greptime | information_schema | engines | savepoints | String | FIELD | | No | String | |
-| greptime | information_schema | engines | xa | String | FIELD | | No | String | |
-| greptime | information_schema | engines | transactions | String | FIELD | | No | String | |
| greptime | information_schema | engines | comment | String | FIELD | | No | String | |
-| greptime | information_schema | engines | support | String | FIELD | | No | String | |
| greptime | information_schema | engines | engine | String | FIELD | | No | String | |
-| greptime | information_schema | events | event_name | String | FIELD | | No | String | |
-| greptime | information_schema | events | last_executed | DateTime | FIELD | | No | DateTime | |
-| greptime | information_schema | events | database_collation | String | FIELD | | No | String | |
-| greptime | information_schema | events | collation_connection | String | FIELD | | No | String | |
+| greptime | information_schema | engines | savepoints | String | FIELD | | No | String | |
+| greptime | information_schema | engines | support | String | FIELD | | No | String | |
+| greptime | information_schema | engines | transactions | String | FIELD | | No | String | |
+| greptime | information_schema | engines | xa | String | FIELD | | No | String | |
| greptime | information_schema | events | character_set_client | String | FIELD | | No | String | |
-| greptime | information_schema | events | originator | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | events | event_catalog | String | FIELD | | No | String | |
-| greptime | information_schema | events | event_schema | String | FIELD | | No | String | |
-| greptime | information_schema | events | event_comment | String | FIELD | | No | String | |
+| greptime | information_schema | events | collation_connection | String | FIELD | | No | String | |
+| greptime | information_schema | events | created | DateTime | FIELD | | No | DateTime | |
+| greptime | information_schema | events | database_collation | String | FIELD | | No | String | |
| greptime | information_schema | events | definer | String | FIELD | | No | String | |
-| greptime | information_schema | events | time_zone | String | FIELD | | No | String | |
+| greptime | information_schema | events | ends | DateTime | FIELD | | No | DateTime | |
| greptime | information_schema | events | event_body | String | FIELD | | No | String | |
+| greptime | information_schema | events | event_catalog | String | FIELD | | No | String | |
+| greptime | information_schema | events | event_comment | String | FIELD | | No | String | |
| greptime | information_schema | events | event_definition | String | FIELD | | No | String | |
+| greptime | information_schema | events | event_name | String | FIELD | | No | String | |
+| greptime | information_schema | events | event_schema | String | FIELD | | No | String | |
| greptime | information_schema | events | event_type | String | FIELD | | No | String | |
| greptime | information_schema | events | execute_at | DateTime | FIELD | | No | DateTime | |
-| greptime | information_schema | events | interval_value | Int64 | FIELD | | No | Int64 | |
| greptime | information_schema | events | interval_field | String | FIELD | | No | String | |
+| greptime | information_schema | events | interval_value | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | events | last_altered | DateTime | FIELD | | No | DateTime | |
+| greptime | information_schema | events | last_executed | DateTime | FIELD | | No | DateTime | |
+| greptime | information_schema | events | on_completion | String | FIELD | | No | String | |
+| greptime | information_schema | events | originator | Int64 | FIELD | | No | Int64 | |
| greptime | information_schema | events | sql_mode | String | FIELD | | No | String | |
| greptime | information_schema | events | starts | DateTime | FIELD | | No | DateTime | |
-| greptime | information_schema | events | ends | DateTime | FIELD | | No | DateTime | |
| greptime | information_schema | events | status | String | FIELD | | No | String | |
-| greptime | information_schema | events | on_completion | String | FIELD | | No | String | |
-| greptime | information_schema | events | created | DateTime | FIELD | | No | DateTime | |
-| greptime | information_schema | events | last_altered | DateTime | FIELD | | No | DateTime | |
-| greptime | information_schema | files | file_name | String | FIELD | | No | String | |
-| greptime | information_schema | files | free_extents | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | events | time_zone | String | FIELD | | No | String | |
+| greptime | information_schema | files | autoextend_size | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | files | avg_row_length | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | files | check_time | DateTime | FIELD | | No | DateTime | |
| greptime | information_schema | files | checksum | String | FIELD | | No | String | |
-| greptime | information_schema | files | update_time | DateTime | FIELD | | No | DateTime | |
| greptime | information_schema | files | create_time | DateTime | FIELD | | No | DateTime | |
-| greptime | information_schema | files | status | String | FIELD | | No | String | |
+| greptime | information_schema | files | creation_time | DateTime | FIELD | | No | DateTime | |
+| greptime | information_schema | files | data_free | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | files | data_length | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | files | deleted_rows | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | files | engine | String | FIELD | | No | String | |
+| greptime | information_schema | files | extent_size | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | files | extra | String | FIELD | | No | String | |
| greptime | information_schema | files | file_id | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | files | check_time | DateTime | FIELD | | No | DateTime | |
+| greptime | information_schema | files | file_name | String | FIELD | | No | String | |
| greptime | information_schema | files | file_type | String | FIELD | | No | String | |
-| greptime | information_schema | files | tablespace_name | String | FIELD | | No | String | |
-| greptime | information_schema | files | table_catalog | String | FIELD | | No | String | |
-| greptime | information_schema | files | table_schema | String | FIELD | | No | String | |
-| greptime | information_schema | files | table_name | String | FIELD | | No | String | |
-| greptime | information_schema | files | logfile_group_name | String | FIELD | | No | String | |
-| greptime | information_schema | files | logfile_group_number | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | files | engine | String | FIELD | | No | String | |
+| greptime | information_schema | files | free_extents | Int64 | FIELD | | No | Int64 | |
| greptime | information_schema | files | fulltext_keys | String | FIELD | | No | String | |
-| greptime | information_schema | files | deleted_rows | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | files | update_count | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | files | extra | String | FIELD | | No | String | |
-| greptime | information_schema | files | total_extents | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | files | extent_size | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | files | index_length | Int64 | FIELD | | No | Int64 | |
| greptime | information_schema | files | initial_size | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | files | maximum_size | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | files | autoextend_size | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | files | creation_time | DateTime | FIELD | | No | DateTime | |
-| greptime | information_schema | files | last_update_time | DateTime | FIELD | | No | DateTime | |
| greptime | information_schema | files | last_access_time | DateTime | FIELD | | No | DateTime | |
+| greptime | information_schema | files | last_update_time | DateTime | FIELD | | No | DateTime | |
+| greptime | information_schema | files | logfile_group_name | String | FIELD | | No | String | |
+| greptime | information_schema | files | logfile_group_number | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | files | max_data_length | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | files | maximum_size | Int64 | FIELD | | No | Int64 | |
| greptime | information_schema | files | recover_time | DateTime | FIELD | | No | DateTime | |
-| greptime | information_schema | files | transaction_counter | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | files | version | String | FIELD | | No | String | |
| greptime | information_schema | files | row_format | String | FIELD | | No | String | |
+| greptime | information_schema | files | status | String | FIELD | | No | String | |
+| greptime | information_schema | files | table_catalog | String | FIELD | | No | String | |
+| greptime | information_schema | files | table_name | String | FIELD | | No | String | |
| greptime | information_schema | files | table_rows | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | files | avg_row_length | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | files | data_length | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | files | max_data_length | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | files | index_length | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | files | data_free | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | files | table_schema | String | FIELD | | No | String | |
+| greptime | information_schema | files | tablespace_name | String | FIELD | | No | String | |
+| greptime | information_schema | files | total_extents | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | files | transaction_counter | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | files | update_count | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | files | update_time | DateTime | FIELD | | No | DateTime | |
+| greptime | information_schema | files | version | String | FIELD | | No | String | |
| greptime | information_schema | global_status | variable_name | String | FIELD | | No | String | |
| greptime | information_schema | global_status | variable_value | String | FIELD | | No | String | |
-| greptime | information_schema | key_column_usage | table_catalog | String | FIELD | | No | String | |
+| greptime | information_schema | key_column_usage | column_name | String | FIELD | | No | String | |
+| greptime | information_schema | key_column_usage | constraint_catalog | String | FIELD | | No | String | |
+| greptime | information_schema | key_column_usage | constraint_name | String | FIELD | | No | String | |
+| greptime | information_schema | key_column_usage | constraint_schema | String | FIELD | | No | String | |
| greptime | information_schema | key_column_usage | ordinal_position | UInt32 | FIELD | | No | UInt32 | |
+| greptime | information_schema | key_column_usage | position_in_unique_constraint | UInt32 | FIELD | | Yes | UInt32 | |
| greptime | information_schema | key_column_usage | referenced_column_name | String | FIELD | | Yes | String | |
| greptime | information_schema | key_column_usage | referenced_table_name | String | FIELD | | Yes | String | |
| greptime | information_schema | key_column_usage | referenced_table_schema | String | FIELD | | Yes | String | |
-| greptime | information_schema | key_column_usage | position_in_unique_constraint | UInt32 | FIELD | | Yes | UInt32 | |
-| greptime | information_schema | key_column_usage | constraint_catalog | String | FIELD | | No | String | |
-| greptime | information_schema | key_column_usage | constraint_schema | String | FIELD | | No | String | |
-| greptime | information_schema | key_column_usage | constraint_name | String | FIELD | | No | String | |
-| greptime | information_schema | key_column_usage | table_schema | String | FIELD | | No | String | |
+| greptime | information_schema | key_column_usage | table_catalog | String | FIELD | | No | String | |
| greptime | information_schema | key_column_usage | table_name | String | FIELD | | No | String | |
-| greptime | information_schema | key_column_usage | column_name | String | FIELD | | No | String | |
+| greptime | information_schema | key_column_usage | table_schema | String | FIELD | | No | String | |
| greptime | information_schema | optimizer_trace | insufficient_privileges | Int64 | FIELD | | No | Int64 | |
| greptime | information_schema | optimizer_trace | missing_bytes_beyond_max_mem_size | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | optimizer_trace | trace | String | FIELD | | No | String | |
| greptime | information_schema | optimizer_trace | query | String | FIELD | | No | String | |
-| greptime | information_schema | parameters | specific_catalog | String | FIELD | | No | String | |
+| greptime | information_schema | optimizer_trace | trace | String | FIELD | | No | String | |
| greptime | information_schema | parameters | character_maximum_length | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | parameters | routine_type | String | FIELD | | No | String | |
-| greptime | information_schema | parameters | dtd_identifier | String | FIELD | | No | String | |
-| greptime | information_schema | parameters | collation_name | String | FIELD | | No | String | |
+| greptime | information_schema | parameters | character_octet_length | Int64 | FIELD | | No | Int64 | |
| greptime | information_schema | parameters | character_set_name | String | FIELD | | No | String | |
+| greptime | information_schema | parameters | collation_name | String | FIELD | | No | String | |
+| greptime | information_schema | parameters | data_type | String | FIELD | | No | String | |
| greptime | information_schema | parameters | datetime_precision | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | parameters | numeric_scale | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | parameters | dtd_identifier | String | FIELD | | No | String | |
| greptime | information_schema | parameters | numeric_precision | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | parameters | character_octet_length | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | parameters | specific_schema | String | FIELD | | No | String | |
-| greptime | information_schema | parameters | specific_name | String | FIELD | | No | String | |
+| greptime | information_schema | parameters | numeric_scale | Int64 | FIELD | | No | Int64 | |
| greptime | information_schema | parameters | ordinal_position | Int64 | FIELD | | No | Int64 | |
| greptime | information_schema | parameters | parameter_mode | String | FIELD | | No | String | |
| greptime | information_schema | parameters | parameter_name | String | FIELD | | No | String | |
-| greptime | information_schema | parameters | data_type | String | FIELD | | No | String | |
+| greptime | information_schema | parameters | routine_type | String | FIELD | | No | String | |
+| greptime | information_schema | parameters | specific_catalog | String | FIELD | | No | String | |
+| greptime | information_schema | parameters | specific_name | String | FIELD | | No | String | |
+| greptime | information_schema | parameters | specific_schema | String | FIELD | | No | String | |
+| greptime | information_schema | profiling | block_ops_in | Int64 | FIELD | | No | Int64 | |
| greptime | information_schema | profiling | block_ops_out | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | profiling | context_involuntary | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | profiling | context_voluntary | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | profiling | cpu_system | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | profiling | cpu_user | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | profiling | duration | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | profiling | messages_received | Int64 | FIELD | | No | Int64 | |
| greptime | information_schema | profiling | messages_sent | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | profiling | source_line | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | profiling | source_file | String | FIELD | | No | String | |
-| greptime | information_schema | profiling | source_function | String | FIELD | | No | String | |
-| greptime | information_schema | profiling | swaps | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | profiling | page_faults_minor | Int64 | FIELD | | No | Int64 | |
| greptime | information_schema | profiling | page_faults_major | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | profiling | messages_received | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | profiling | page_faults_minor | Int64 | FIELD | | No | Int64 | |
| greptime | information_schema | profiling | query_id | Int64 | FIELD | | No | Int64 | |
| greptime | information_schema | profiling | seq | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | profiling | source_file | String | FIELD | | No | String | |
+| greptime | information_schema | profiling | source_function | String | FIELD | | No | String | |
+| greptime | information_schema | profiling | source_line | Int64 | FIELD | | No | Int64 | |
| greptime | information_schema | profiling | state | String | FIELD | | No | String | |
-| greptime | information_schema | profiling | duration | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | profiling | cpu_user | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | profiling | cpu_system | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | profiling | context_voluntary | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | profiling | context_involuntary | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | profiling | block_ops_in | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | referential_constraints | referenced_table_name | String | FIELD | | No | String | |
+| greptime | information_schema | profiling | swaps | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | referential_constraints | constraint_catalog | String | FIELD | | No | String | |
| greptime | information_schema | referential_constraints | constraint_name | String | FIELD | | No | String | |
-| greptime | information_schema | referential_constraints | table_name | String | FIELD | | No | String | |
+| greptime | information_schema | referential_constraints | constraint_schema | String | FIELD | | No | String | |
| greptime | information_schema | referential_constraints | delete_rule | String | FIELD | | No | String | |
-| greptime | information_schema | referential_constraints | update_rule | String | FIELD | | No | String | |
| greptime | information_schema | referential_constraints | match_option | String | FIELD | | No | String | |
+| greptime | information_schema | referential_constraints | referenced_table_name | String | FIELD | | No | String | |
+| greptime | information_schema | referential_constraints | table_name | String | FIELD | | No | String | |
+| greptime | information_schema | referential_constraints | unique_constraint_catalog | String | FIELD | | No | String | |
| greptime | information_schema | referential_constraints | unique_constraint_name | String | FIELD | | No | String | |
| greptime | information_schema | referential_constraints | unique_constraint_schema | String | FIELD | | No | String | |
-| greptime | information_schema | referential_constraints | unique_constraint_catalog | String | FIELD | | No | String | |
-| greptime | information_schema | referential_constraints | constraint_catalog | String | FIELD | | No | String | |
-| greptime | information_schema | referential_constraints | constraint_schema | String | FIELD | | No | String | |
-| greptime | information_schema | routines | sql_mode | String | FIELD | | No | String | |
-| greptime | information_schema | routines | security_type | String | FIELD | | No | String | |
-| greptime | information_schema | routines | database_collation | String | FIELD | | No | String | |
-| greptime | information_schema | routines | data_type | String | FIELD | | No | String | |
-| greptime | information_schema | routines | character_set_client | String | FIELD | | No | String | |
-| greptime | information_schema | routines | definer | String | FIELD | | No | String | |
-| greptime | information_schema | routines | routine_comment | String | FIELD | | No | String | |
-| greptime | information_schema | routines | last_altered | DateTime | FIELD | | No | DateTime | |
-| greptime | information_schema | routines | created | DateTime | FIELD | | No | DateTime | |
-| greptime | information_schema | routines | specific_name | String | FIELD | | No | String | |
-| greptime | information_schema | routines | routine_catalog | String | FIELD | | No | String | |
-| greptime | information_schema | routines | routine_schema | String | FIELD | | No | String | |
-| greptime | information_schema | routines | routine_name | String | FIELD | | No | String | |
-| greptime | information_schema | routines | routine_type | String | FIELD | | No | String | |
-| greptime | information_schema | routines | collation_connection | String | FIELD | | No | String | |
+| greptime | information_schema | referential_constraints | update_rule | String | FIELD | | No | String | |
| greptime | information_schema | routines | character_maximum_length | Int64 | FIELD | | No | Int64 | |
| greptime | information_schema | routines | character_octet_length | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | routines | numeric_precision | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | routines | numeric_scale | Int64 | FIELD | | No | Int64 | |
-| greptime | information_schema | routines | datetime_precision | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | routines | character_set_client | String | FIELD | | No | String | |
| greptime | information_schema | routines | character_set_name | String | FIELD | | No | String | |
+| greptime | information_schema | routines | collation_connection | String | FIELD | | No | String | |
| greptime | information_schema | routines | collation_name | String | FIELD | | No | String | |
+| greptime | information_schema | routines | created | DateTime | FIELD | | No | DateTime | |
+| greptime | information_schema | routines | data_type | String | FIELD | | No | String | |
+| greptime | information_schema | routines | database_collation | String | FIELD | | No | String | |
+| greptime | information_schema | routines | datetime_precision | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | routines | definer | String | FIELD | | No | String | |
| greptime | information_schema | routines | dtd_identifier | String | FIELD | | No | String | |
-| greptime | information_schema | routines | routine_body | String | FIELD | | No | String | |
-| greptime | information_schema | routines | routine_definition | String | FIELD | | No | String | |
-| greptime | information_schema | routines | external_name | String | FIELD | | No | String | |
| greptime | information_schema | routines | external_language | String | FIELD | | No | String | |
-| greptime | information_schema | routines | parameter_style | String | FIELD | | No | String | |
+| greptime | information_schema | routines | external_name | String | FIELD | | No | String | |
| greptime | information_schema | routines | is_deterministic | String | FIELD | | No | String | |
+| greptime | information_schema | routines | last_altered | DateTime | FIELD | | No | DateTime | |
+| greptime | information_schema | routines | numeric_precision | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | routines | numeric_scale | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | routines | parameter_style | String | FIELD | | No | String | |
+| greptime | information_schema | routines | routine_body | String | FIELD | | No | String | |
+| greptime | information_schema | routines | routine_catalog | String | FIELD | | No | String | |
+| greptime | information_schema | routines | routine_comment | String | FIELD | | No | String | |
+| greptime | information_schema | routines | routine_definition | String | FIELD | | No | String | |
+| greptime | information_schema | routines | routine_name | String | FIELD | | No | String | |
+| greptime | information_schema | routines | routine_schema | String | FIELD | | No | String | |
+| greptime | information_schema | routines | routine_type | String | FIELD | | No | String | |
+| greptime | information_schema | routines | security_type | String | FIELD | | No | String | |
+| greptime | information_schema | routines | specific_name | String | FIELD | | No | String | |
| greptime | information_schema | routines | sql_data_access | String | FIELD | | No | String | |
+| greptime | information_schema | routines | sql_mode | String | FIELD | | No | String | |
| greptime | information_schema | routines | sql_path | String | FIELD | | No | String | |
-| greptime | information_schema | schema_privileges | is_grantable | String | FIELD | | No | String | |
| greptime | information_schema | schema_privileges | grantee | String | FIELD | | No | String | |
+| greptime | information_schema | schema_privileges | is_grantable | String | FIELD | | No | String | |
| greptime | information_schema | schema_privileges | privilege_type | String | FIELD | | No | String | |
-| greptime | information_schema | schema_privileges | table_schema | String | FIELD | | No | String | |
| greptime | information_schema | schema_privileges | table_catalog | String | FIELD | | No | String | |
-| greptime | information_schema | schemata | schema_name | String | FIELD | | No | String | |
+| greptime | information_schema | schema_privileges | table_schema | String | FIELD | | No | String | |
| greptime | information_schema | schemata | catalog_name | String | FIELD | | No | String | |
-| greptime | information_schema | schemata | sql_path | String | FIELD | | Yes | String | |
| greptime | information_schema | schemata | default_character_set_name | String | FIELD | | No | String | |
| greptime | information_schema | schemata | default_collation_name | String | FIELD | | No | String | |
-| greptime | information_schema | session_status | variable_value | String | FIELD | | No | String | |
+| greptime | information_schema | schemata | schema_name | String | FIELD | | No | String | |
+| greptime | information_schema | schemata | sql_path | String | FIELD | | Yes | String | |
| greptime | information_schema | session_status | variable_name | String | FIELD | | No | String | |
+| greptime | information_schema | session_status | variable_value | String | FIELD | | No | String | |
| greptime | information_schema | table_privileges | grantee | String | FIELD | | No | String | |
+| greptime | information_schema | table_privileges | is_grantable | String | FIELD | | No | String | |
+| greptime | information_schema | table_privileges | privilege_type | String | FIELD | | No | String | |
| greptime | information_schema | table_privileges | table_catalog | String | FIELD | | No | String | |
-| greptime | information_schema | table_privileges | table_schema | String | FIELD | | No | String | |
| greptime | information_schema | table_privileges | table_name | String | FIELD | | No | String | |
-| greptime | information_schema | table_privileges | privilege_type | String | FIELD | | No | String | |
-| greptime | information_schema | table_privileges | is_grantable | String | FIELD | | No | String | |
+| greptime | information_schema | table_privileges | table_schema | String | FIELD | | No | String | |
+| greptime | information_schema | tables | engine | String | FIELD | | Yes | String | |
| greptime | information_schema | tables | table_catalog | String | FIELD | | No | String | |
+| greptime | information_schema | tables | table_id | UInt32 | FIELD | | Yes | UInt32 | |
| greptime | information_schema | tables | table_name | String | FIELD | | No | String | |
-| greptime | information_schema | tables | table_type | String | FIELD | | No | String | |
-| greptime | information_schema | tables | engine | String | FIELD | | Yes | String | |
| greptime | information_schema | tables | table_schema | String | FIELD | | No | String | |
-| greptime | information_schema | tables | table_id | UInt32 | FIELD | | Yes | UInt32 | |
-| greptime | information_schema | triggers | event_manipulation | String | FIELD | | No | String | |
-| greptime | information_schema | triggers | event_object_schema | String | FIELD | | No | String | |
-| greptime | information_schema | triggers | event_object_table | String | FIELD | | No | String | |
-| greptime | information_schema | triggers | action_order | Int64 | FIELD | | No | Int64 | |
+| greptime | information_schema | tables | table_type | String | FIELD | | No | String | |
| greptime | information_schema | triggers | action_condition | String | FIELD | | No | String | |
-| greptime | information_schema | triggers | action_statement | String | FIELD | | No | String | |
+| greptime | information_schema | triggers | action_order | Int64 | FIELD | | No | Int64 | |
| greptime | information_schema | triggers | action_orientation | String | FIELD | | No | String | |
-| greptime | information_schema | triggers | action_timing | String | FIELD | | No | String | |
-| greptime | information_schema | triggers | action_reference_old_table | String | FIELD | | No | String | |
+| greptime | information_schema | triggers | action_reference_new_row | String | FIELD | | No | String | |
| greptime | information_schema | triggers | action_reference_new_table | String | FIELD | | No | String | |
| greptime | information_schema | triggers | action_reference_old_row | String | FIELD | | No | String | |
-| greptime | information_schema | triggers | action_reference_new_row | String | FIELD | | No | String | |
-| greptime | information_schema | triggers | created | DateTime | FIELD | | No | DateTime | |
-| greptime | information_schema | triggers | sql_mode | String | FIELD | | No | String | |
-| greptime | information_schema | triggers | definer | String | FIELD | | No | String | |
+| greptime | information_schema | triggers | action_reference_old_table | String | FIELD | | No | String | |
+| greptime | information_schema | triggers | action_statement | String | FIELD | | No | String | |
+| greptime | information_schema | triggers | action_timing | String | FIELD | | No | String | |
| greptime | information_schema | triggers | character_set_client | String | FIELD | | No | String | |
| greptime | information_schema | triggers | collation_connection | String | FIELD | | No | String | |
+| greptime | information_schema | triggers | created | DateTime | FIELD | | No | DateTime | |
| greptime | information_schema | triggers | database_collation | String | FIELD | | No | String | |
-| greptime | information_schema | triggers | trigger_catalog | String | FIELD | | No | String | |
+| greptime | information_schema | triggers | definer | String | FIELD | | No | String | |
+| greptime | information_schema | triggers | event_manipulation | String | FIELD | | No | String | |
| greptime | information_schema | triggers | event_object_catalog | String | FIELD | | No | String | |
+| greptime | information_schema | triggers | event_object_schema | String | FIELD | | No | String | |
+| greptime | information_schema | triggers | event_object_table | String | FIELD | | No | String | |
+| greptime | information_schema | triggers | sql_mode | String | FIELD | | No | String | |
+| greptime | information_schema | triggers | trigger_catalog | String | FIELD | | No | String | |
| greptime | information_schema | triggers | trigger_name | String | FIELD | | No | String | |
| greptime | information_schema | triggers | trigger_schema | String | FIELD | | No | String | |
| greptime | public | numbers | number | UInt32 | TAG | | No | UInt32 | |
@@ -615,6 +609,19 @@ select * from CHECK_CONSTRAINTS;
+--------------------+-------------------+-----------------+--------------+
+--------------------+-------------------+-----------------+--------------+
+desc table RUNTIME_METRICS;
+
++-------------+----------------------+-----+------+---------+---------------+
+| Column | Type | Key | Null | Default | Semantic Type |
++-------------+----------------------+-----+------+---------+---------------+
+| metric_name | String | | NO | | FIELD |
+| value | Float64 | | NO | | FIELD |
+| labels | String | | YES | | FIELD |
+| node | String | | NO | | FIELD |
+| node_type | String | | NO | | FIELD |
+| timestamp | TimestampMillisecond | | NO | | FIELD |
++-------------+----------------------+-----+------+---------+---------------+
+
use public;
Affected Rows: 0
diff --git a/tests/cases/standalone/common/system/information_schema.sql b/tests/cases/standalone/common/system/information_schema.sql
index 0ba3508aca47..c1109beab8aa 100644
--- a/tests/cases/standalone/common/system/information_schema.sql
+++ b/tests/cases/standalone/common/system/information_schema.sql
@@ -7,7 +7,7 @@ from information_schema.tables
where table_name != 'scripts'
order by table_schema, table_name;
-select * from information_schema.columns order by table_schema, table_name;
+select * from information_schema.columns order by table_schema, table_name, column_name;
create
database my_db;
@@ -115,4 +115,6 @@ desc table CHECK_CONSTRAINTS;
select * from CHECK_CONSTRAINTS;
+desc table RUNTIME_METRICS;
+
use public;
|
feat
|
adds runtime_metrics (#3127)
|
7c97fae5224372712e1214d15ecfc7d7197d8e77
|
2025-03-11 23:21:18
|
Lei, HUANG
|
chore: check region wal provider on startup to avoid inconsistence (#5687)
| false
|
diff --git a/src/mito2/Cargo.toml b/src/mito2/Cargo.toml
index 11f31502b191..2de3b3923f97 100644
--- a/src/mito2/Cargo.toml
+++ b/src/mito2/Cargo.toml
@@ -6,7 +6,7 @@ license.workspace = true
[features]
default = []
-test = ["common-test-util", "log-store", "rstest", "rstest_reuse", "rskafka"]
+test = ["common-test-util", "rstest", "rstest_reuse", "rskafka"]
[lints]
workspace = true
@@ -45,7 +45,7 @@ humantime-serde.workspace = true
index.workspace = true
itertools.workspace = true
lazy_static = "1.4"
-log-store = { workspace = true, optional = true }
+log-store = { workspace = true }
memcomparable = "0.2"
moka = { workspace = true, features = ["sync", "future"] }
object-store.workspace = true
diff --git a/src/mito2/src/error.rs b/src/mito2/src/error.rs
index 86b310e1ac1d..ecaeb0786ddc 100644
--- a/src/mito2/src/error.rs
+++ b/src/mito2/src/error.rs
@@ -968,6 +968,9 @@ pub enum Error {
#[snafu(display("Manual compaction is override by following operations."))]
ManualCompactionOverride {},
+
+ #[snafu(display("Incompatible WAL provider change. This is typically caused by changing WAL provider in database config file without completely cleaning existing files. Global provider: {}, region provider: {}", global, region))]
+ IncompatibleWalProviderChange { global: String, region: String },
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
@@ -1114,6 +1117,8 @@ impl ErrorExt for Error {
}
ManualCompactionOverride {} => StatusCode::Cancelled,
+
+ IncompatibleWalProviderChange { .. } => StatusCode::InvalidArguments,
}
}
diff --git a/src/mito2/src/region/opener.rs b/src/mito2/src/region/opener.rs
index 2992a475ab35..e984c5c5f275 100644
--- a/src/mito2/src/region/opener.rs
+++ b/src/mito2/src/region/opener.rs
@@ -14,6 +14,7 @@
//! Region opener.
+use std::any::TypeId;
use std::collections::HashMap;
use std::sync::atomic::AtomicI64;
use std::sync::Arc;
@@ -22,6 +23,8 @@ use common_telemetry::{debug, error, info, warn};
use common_wal::options::WalOptions;
use futures::future::BoxFuture;
use futures::StreamExt;
+use log_store::kafka::log_store::KafkaLogStore;
+use log_store::raft_engine::log_store::RaftEngineLogStore;
use object_store::manager::ObjectStoreManagerRef;
use object_store::util::{join_dir, normalize_dir};
use snafu::{ensure, OptionExt, ResultExt};
@@ -34,6 +37,7 @@ use store_api::storage::{ColumnId, RegionId};
use crate::access_layer::AccessLayer;
use crate::cache::CacheManagerRef;
use crate::config::MitoConfig;
+use crate::error;
use crate::error::{
EmptyRegionDirSnafu, InvalidMetadataSnafu, ObjectStoreNotFoundSnafu, RegionCorruptedSnafu,
Result, StaleLogEntrySnafu,
@@ -204,7 +208,7 @@ impl RegionOpener {
// Safety: must be set before calling this method.
let options = self.options.take().unwrap();
let object_store = self.object_store(&options.storage)?.clone();
- let provider = self.provider(&options.wal_options);
+ let provider = self.provider::<S>(&options.wal_options)?;
let metadata = Arc::new(metadata);
// Create a manifest manager for this region and writes regions to the manifest file.
let region_manifest_options = self.manifest_options(config, &options)?;
@@ -297,10 +301,28 @@ impl RegionOpener {
Ok(region)
}
- fn provider(&self, wal_options: &WalOptions) -> Provider {
+ fn provider<S: LogStore>(&self, wal_options: &WalOptions) -> Result<Provider> {
match wal_options {
- WalOptions::RaftEngine => Provider::raft_engine_provider(self.region_id.as_u64()),
- WalOptions::Kafka(options) => Provider::kafka_provider(options.topic.to_string()),
+ WalOptions::RaftEngine => {
+ ensure!(
+ TypeId::of::<RaftEngineLogStore>() == TypeId::of::<S>(),
+ error::IncompatibleWalProviderChangeSnafu {
+ global: "`kafka`",
+ region: "`raft_engine`",
+ }
+ );
+ Ok(Provider::raft_engine_provider(self.region_id.as_u64()))
+ }
+ WalOptions::Kafka(options) => {
+ ensure!(
+ TypeId::of::<KafkaLogStore>() == TypeId::of::<S>(),
+ error::IncompatibleWalProviderChangeSnafu {
+ global: "`raft_engine`",
+ region: "`kafka`",
+ }
+ );
+ Ok(Provider::kafka_provider(options.topic.to_string()))
+ }
}
}
@@ -326,7 +348,7 @@ impl RegionOpener {
let metadata = manifest.metadata.clone();
let region_id = self.region_id;
- let provider = self.provider(®ion_options.wal_options);
+ let provider = self.provider::<S>(®ion_options.wal_options)?;
let wal_entry_reader = self
.wal_entry_reader
.take()
|
chore
|
check region wal provider on startup to avoid inconsistence (#5687)
|
ce959ddd3f7d1f14988a31f565f29710f9e22d3e
|
2023-11-20 08:25:50
|
Yingwen
|
feat(mito): implements row group level page cache (#2688)
| false
|
diff --git a/config/datanode.example.toml b/config/datanode.example.toml
index a35c79a38e76..3d4d3c81e845 100644
--- a/config/datanode.example.toml
+++ b/config/datanode.example.toml
@@ -105,10 +105,11 @@ global_write_buffer_reject_size = "2GB"
sst_meta_cache_size = "128MB"
# Cache size for vectors and arrow arrays (default 512MB). Setting it to 0 to disable the cache.
vector_cache_size = "512MB"
+# Cache size for pages of SST row groups (default 512MB). Setting it to 0 to disable the cache.
+page_cache_size = "512MB"
# Buffer size for SST writing.
sst_write_buffer_size = "8MB"
-
# Log options, see `standalone.example.toml`
# [logging]
# dir = "/tmp/greptimedb/logs"
diff --git a/config/standalone.example.toml b/config/standalone.example.toml
index 623b5f5452e0..254b89d02f29 100644
--- a/config/standalone.example.toml
+++ b/config/standalone.example.toml
@@ -152,6 +152,36 @@ auto_flush_interval = "1h"
# Global write buffer size for all regions.
global_write_buffer_size = "1GB"
+# Mito engine options
+[[region_engine]]
+[region_engine.mito]
+# Number of region workers
+num_workers = 8
+# Request channel size of each worker
+worker_channel_size = 128
+# Max batch size for a worker to handle requests
+worker_request_batch_size = 64
+# Number of meta action updated to trigger a new checkpoint for the manifest
+manifest_checkpoint_distance = 10
+# Manifest compression type
+manifest_compress_type = "Uncompressed"
+# Max number of running background jobs
+max_background_jobs = 4
+# Interval to auto flush a region if it has not flushed yet.
+auto_flush_interval = "1h"
+# Global write buffer size for all regions.
+global_write_buffer_size = "1GB"
+# Global write buffer size threshold to reject write requests (default 2G).
+global_write_buffer_reject_size = "2GB"
+# Cache size for SST metadata (default 128MB). Setting it to 0 to disable the cache.
+sst_meta_cache_size = "128MB"
+# Cache size for vectors and arrow arrays (default 512MB). Setting it to 0 to disable the cache.
+vector_cache_size = "512MB"
+# Cache size for pages of SST row groups (default 512MB). Setting it to 0 to disable the cache.
+page_cache_size = "512MB"
+# Buffer size for SST writing.
+sst_write_buffer_size = "8MB"
+
# Log options
# [logging]
# Specify logs directory.
diff --git a/src/mito2/src/cache.rs b/src/mito2/src/cache.rs
index dbdbdc72faa8..529e5d3d4eee 100644
--- a/src/mito2/src/cache.rs
+++ b/src/mito2/src/cache.rs
@@ -24,33 +24,51 @@ use std::sync::Arc;
use datatypes::value::Value;
use datatypes::vectors::VectorRef;
use moka::sync::Cache;
+use parquet::column::page::Page;
use parquet::file::metadata::ParquetMetaData;
use store_api::storage::RegionId;
use crate::cache::cache_size::parquet_meta_size;
+use crate::metrics::{CACHE_BYTES, CACHE_HIT, CACHE_MISS};
use crate::sst::file::FileId;
+// Metrics type key for sst meta.
+const SST_META_TYPE: &str = "sst_meta";
+// Metrics type key for vector.
+const VECTOR_TYPE: &str = "vector";
+// Metrics type key for pages.
+const PAGE_TYPE: &str = "page";
+
/// Manages cached data for the engine.
pub struct CacheManager {
/// Cache for SST metadata.
sst_meta_cache: Option<SstMetaCache>,
/// Cache for vectors.
vector_cache: Option<VectorCache>,
+ /// Cache for SST pages.
+ page_cache: Option<PageCache>,
}
pub type CacheManagerRef = Arc<CacheManager>;
impl CacheManager {
/// Creates a new manager with specific cache size in bytes.
- pub fn new(sst_meta_cache_size: u64, vector_cache_size: u64) -> CacheManager {
+ pub fn new(
+ sst_meta_cache_size: u64,
+ vector_cache_size: u64,
+ page_cache_size: u64,
+ ) -> CacheManager {
let sst_meta_cache = if sst_meta_cache_size == 0 {
None
} else {
let cache = Cache::builder()
.max_capacity(sst_meta_cache_size)
- .weigher(|k: &SstMetaKey, v: &Arc<ParquetMetaData>| {
- // We ignore the size of `Arc`.
- (k.estimated_size() + parquet_meta_size(v)) as u32
+ .weigher(meta_cache_weight)
+ .eviction_listener(|k, v, _cause| {
+ let size = meta_cache_weight(&k, &v);
+ CACHE_BYTES
+ .with_label_values(&[SST_META_TYPE])
+ .sub(size.into());
})
.build();
Some(cache)
@@ -60,9 +78,25 @@ impl CacheManager {
} else {
let cache = Cache::builder()
.max_capacity(vector_cache_size)
- .weigher(|_k, v: &VectorRef| {
- // We ignore the heap size of `Value`.
- (mem::size_of::<Value>() + v.memory_size()) as u32
+ .weigher(vector_cache_weight)
+ .eviction_listener(|k, v, _cause| {
+ let size = vector_cache_weight(&k, &v);
+ CACHE_BYTES
+ .with_label_values(&[VECTOR_TYPE])
+ .sub(size.into());
+ })
+ .build();
+ Some(cache)
+ };
+ let page_cache = if page_cache_size == 0 {
+ None
+ } else {
+ let cache = Cache::builder()
+ .max_capacity(page_cache_size)
+ .weigher(page_cache_weight)
+ .eviction_listener(|k, v, _cause| {
+ let size = page_cache_weight(&k, &v);
+ CACHE_BYTES.with_label_values(&[PAGE_TYPE]).sub(size.into());
})
.build();
Some(cache)
@@ -71,6 +105,7 @@ impl CacheManager {
CacheManager {
sst_meta_cache,
vector_cache,
+ page_cache,
}
}
@@ -80,9 +115,10 @@ impl CacheManager {
region_id: RegionId,
file_id: FileId,
) -> Option<Arc<ParquetMetaData>> {
- self.sst_meta_cache
- .as_ref()
- .and_then(|sst_meta_cache| sst_meta_cache.get(&SstMetaKey(region_id, file_id)))
+ self.sst_meta_cache.as_ref().and_then(|sst_meta_cache| {
+ let value = sst_meta_cache.get(&SstMetaKey(region_id, file_id));
+ update_hit_miss(value, SST_META_TYPE)
+ })
}
/// Puts [ParquetMetaData] into the cache.
@@ -93,7 +129,11 @@ impl CacheManager {
metadata: Arc<ParquetMetaData>,
) {
if let Some(cache) = &self.sst_meta_cache {
- cache.insert(SstMetaKey(region_id, file_id), metadata);
+ let key = SstMetaKey(region_id, file_id);
+ CACHE_BYTES
+ .with_label_values(&[SST_META_TYPE])
+ .add(meta_cache_weight(&key, &metadata).into());
+ cache.insert(key, metadata);
}
}
@@ -106,17 +146,63 @@ impl CacheManager {
/// Gets a vector with repeated value for specific `key`.
pub fn get_repeated_vector(&self, key: &Value) -> Option<VectorRef> {
- self.vector_cache
- .as_ref()
- .and_then(|vector_cache| vector_cache.get(key))
+ self.vector_cache.as_ref().and_then(|vector_cache| {
+ let value = vector_cache.get(key);
+ update_hit_miss(value, VECTOR_TYPE)
+ })
}
/// Puts a vector with repeated value into the cache.
pub fn put_repeated_vector(&self, key: Value, vector: VectorRef) {
if let Some(cache) = &self.vector_cache {
+ CACHE_BYTES
+ .with_label_values(&[VECTOR_TYPE])
+ .add(vector_cache_weight(&key, &vector).into());
cache.insert(key, vector);
}
}
+
+ /// Gets pages for the row group.
+ pub fn get_pages(&self, page_key: &PageKey) -> Option<Arc<PageValue>> {
+ self.page_cache.as_ref().and_then(|page_cache| {
+ let value = page_cache.get(page_key);
+ update_hit_miss(value, PAGE_TYPE)
+ })
+ }
+
+ /// Puts pages of the row group into the cache.
+ pub fn put_pages(&self, page_key: PageKey, pages: Arc<PageValue>) {
+ if let Some(cache) = &self.page_cache {
+ CACHE_BYTES
+ .with_label_values(&[PAGE_TYPE])
+ .add(page_cache_weight(&page_key, &pages).into());
+ cache.insert(page_key, pages);
+ }
+ }
+}
+
+fn meta_cache_weight(k: &SstMetaKey, v: &Arc<ParquetMetaData>) -> u32 {
+ // We ignore the size of `Arc`.
+ (k.estimated_size() + parquet_meta_size(v)) as u32
+}
+
+fn vector_cache_weight(_k: &Value, v: &VectorRef) -> u32 {
+ // We ignore the heap size of `Value`.
+ (mem::size_of::<Value>() + v.memory_size()) as u32
+}
+
+fn page_cache_weight(k: &PageKey, v: &Arc<PageValue>) -> u32 {
+ (k.estimated_size() + v.estimated_size()) as u32
+}
+
+/// Updates cache hit/miss metrics.
+fn update_hit_miss<T>(value: Option<T>, cache_type: &str) -> Option<T> {
+ if value.is_some() {
+ CACHE_HIT.with_label_values(&[cache_type]).inc();
+ } else {
+ CACHE_MISS.with_label_values(&[cache_type]).inc();
+ }
+ value
}
/// Cache key (region id, file id) for SST meta.
@@ -126,7 +212,46 @@ struct SstMetaKey(RegionId, FileId);
impl SstMetaKey {
/// Returns memory used by the key (estimated).
fn estimated_size(&self) -> usize {
- mem::size_of::<SstMetaKey>()
+ mem::size_of::<Self>()
+ }
+}
+
+/// Cache key for pages of a SST row group.
+#[derive(Debug, Clone, PartialEq, Eq, Hash)]
+pub struct PageKey {
+ /// Region id of the SST file to cache.
+ pub region_id: RegionId,
+ /// Id of the SST file to cache.
+ pub file_id: FileId,
+ /// Index of the row group.
+ pub row_group_idx: usize,
+ /// Index of the column in the row group.
+ pub column_idx: usize,
+}
+
+impl PageKey {
+ /// Returns memory used by the key (estimated).
+ fn estimated_size(&self) -> usize {
+ mem::size_of::<Self>()
+ }
+}
+
+/// Cached row group pages for a column.
+pub struct PageValue {
+ /// All pages of the column in the row group.
+ pub pages: Vec<Page>,
+}
+
+impl PageValue {
+ /// Creates a new page value.
+ pub fn new(pages: Vec<Page>) -> PageValue {
+ PageValue { pages }
+ }
+
+ /// Returns memory used by the value (estimated).
+ fn estimated_size(&self) -> usize {
+ // We only consider heap size of all pages.
+ self.pages.iter().map(|page| page.buffer().len()).sum()
}
}
@@ -136,6 +261,8 @@ type SstMetaCache = Cache<SstMetaKey, Arc<ParquetMetaData>>;
///
/// e.g. `"hello" => ["hello", "hello", "hello"]`
type VectorCache = Cache<Value, VectorRef>;
+/// Maps (region, file, row group, column) to [PageValue].
+type PageCache = Cache<PageKey, Arc<PageValue>>;
#[cfg(test)]
mod tests {
@@ -146,8 +273,10 @@ mod tests {
#[test]
fn test_disable_cache() {
- let cache = CacheManager::new(0, 0);
+ let cache = CacheManager::new(0, 0, 0);
assert!(cache.sst_meta_cache.is_none());
+ assert!(cache.vector_cache.is_none());
+ assert!(cache.page_cache.is_none());
let region_id = RegionId::new(1, 1);
let file_id = FileId::random();
@@ -159,11 +288,21 @@ mod tests {
let vector: VectorRef = Arc::new(Int64Vector::from_slice([10, 10, 10, 10]));
cache.put_repeated_vector(value.clone(), vector.clone());
assert!(cache.get_repeated_vector(&value).is_none());
+
+ let key = PageKey {
+ region_id,
+ file_id,
+ row_group_idx: 0,
+ column_idx: 0,
+ };
+ let pages = Arc::new(PageValue::new(Vec::new()));
+ cache.put_pages(key.clone(), pages);
+ assert!(cache.get_pages(&key).is_none());
}
#[test]
fn test_parquet_meta_cache() {
- let cache = CacheManager::new(2000, 0);
+ let cache = CacheManager::new(2000, 0, 0);
let region_id = RegionId::new(1, 1);
let file_id = FileId::random();
assert!(cache.get_parquet_meta_data(region_id, file_id).is_none());
@@ -176,7 +315,7 @@ mod tests {
#[test]
fn test_repeated_vector_cache() {
- let cache = CacheManager::new(0, 4096);
+ let cache = CacheManager::new(0, 4096, 0);
let value = Value::Int64(10);
assert!(cache.get_repeated_vector(&value).is_none());
let vector: VectorRef = Arc::new(Int64Vector::from_slice([10, 10, 10, 10]));
@@ -184,4 +323,21 @@ mod tests {
let cached = cache.get_repeated_vector(&value).unwrap();
assert_eq!(vector, cached);
}
+
+ #[test]
+ fn test_page_cache() {
+ let cache = CacheManager::new(0, 0, 1000);
+ let region_id = RegionId::new(1, 1);
+ let file_id = FileId::random();
+ let key = PageKey {
+ region_id,
+ file_id,
+ row_group_idx: 0,
+ column_idx: 0,
+ };
+ assert!(cache.get_pages(&key).is_none());
+ let pages = Arc::new(PageValue::new(Vec::new()));
+ cache.put_pages(key.clone(), pages);
+ assert!(cache.get_pages(&key).is_some());
+ }
}
diff --git a/src/mito2/src/config.rs b/src/mito2/src/config.rs
index b4d77d145707..e6061ffa7a2e 100644
--- a/src/mito2/src/config.rs
+++ b/src/mito2/src/config.rs
@@ -63,6 +63,10 @@ pub struct MitoConfig {
pub sst_meta_cache_size: ReadableSize,
/// Cache size for vectors and arrow arrays (default 512MB). Setting it to 0 to disable the cache.
pub vector_cache_size: ReadableSize,
+ /// Cache size for pages of SST row groups (default 512MB). Setting it to 0 to disable the cache.
+ pub page_cache_size: ReadableSize,
+
+ // Other configs:
/// Buffer size for SST writing.
pub sst_write_buffer_size: ReadableSize,
}
@@ -81,6 +85,7 @@ impl Default for MitoConfig {
global_write_buffer_reject_size: ReadableSize::gb(2),
sst_meta_cache_size: ReadableSize::mb(128),
vector_cache_size: ReadableSize::mb(512),
+ page_cache_size: ReadableSize::mb(512),
sst_write_buffer_size: ReadableSize::mb(8),
}
}
diff --git a/src/mito2/src/metrics.rs b/src/mito2/src/metrics.rs
index 7b8aa475da93..d53cbd495dd5 100644
--- a/src/mito2/src/metrics.rs
+++ b/src/mito2/src/metrics.rs
@@ -37,8 +37,6 @@ lazy_static! {
)
.unwrap();
-
-
// ------ Flush related metrics
/// Counter of scheduled flush requests.
/// Note that the flush scheduler may merge some flush requests.
@@ -122,4 +120,27 @@ lazy_static! {
pub static ref MERGE_FILTER_ROWS_TOTAL: IntCounterVec =
register_int_counter_vec!("mito_merge_filter_rows_total", "mito merge filter rows total", &[TYPE_LABEL]).unwrap();
// ------- End of query metrics.
+
+ // Cache related metrics.
+ /// Cache hit counter.
+ pub static ref CACHE_HIT: IntCounterVec = register_int_counter_vec!(
+ "mito_cache_hit",
+ "mito cache hit",
+ &[TYPE_LABEL]
+ )
+ .unwrap();
+ /// Cache miss counter.
+ pub static ref CACHE_MISS: IntCounterVec = register_int_counter_vec!(
+ "mito_cache_miss",
+ "mito cache miss",
+ &[TYPE_LABEL]
+ )
+ .unwrap();
+ /// Cache size in bytes.
+ pub static ref CACHE_BYTES: IntGaugeVec = register_int_gauge_vec!(
+ "mito_cache_bytes",
+ "mito cache bytes",
+ &[TYPE_LABEL]
+ )
+ .unwrap();
}
diff --git a/src/mito2/src/read.rs b/src/mito2/src/read.rs
index ee2305fdd916..7c3fda279026 100644
--- a/src/mito2/src/read.rs
+++ b/src/mito2/src/read.rs
@@ -686,7 +686,7 @@ mod tests {
op_types: &[OpType],
field: &[u64],
) -> Batch {
- new_batch_builder(b"test", timestamps, sequences, op_types, field)
+ new_batch_builder(b"test", timestamps, sequences, op_types, 1, field)
.build()
.unwrap()
}
diff --git a/src/mito2/src/read/projection.rs b/src/mito2/src/read/projection.rs
index ba1f462dcd4d..c5e6feefcbe1 100644
--- a/src/mito2/src/read/projection.rs
+++ b/src/mito2/src/read/projection.rs
@@ -342,7 +342,7 @@ mod tests {
assert_eq!([0, 1, 2, 3, 4], mapper.column_ids());
assert_eq!([3, 4], mapper.batch_fields());
- let cache = CacheManager::new(0, 1024);
+ let cache = CacheManager::new(0, 1024, 0);
let batch = new_batch(0, &[1, 2], &[(3, 3), (4, 4)], 3);
let record_batch = mapper.convert(&batch, Some(&cache)).unwrap();
let expect = "\
diff --git a/src/mito2/src/sst/parquet.rs b/src/mito2/src/sst/parquet.rs
index 481f98f1af12..af3f8479f39c 100644
--- a/src/mito2/src/sst/parquet.rs
+++ b/src/mito2/src/sst/parquet.rs
@@ -15,6 +15,7 @@
//! SST in parquet format.
mod format;
+mod page_reader;
pub mod reader;
pub mod row_group;
mod stats;
@@ -59,3 +60,139 @@ pub struct SstInfo {
/// Number of rows.
pub num_rows: usize,
}
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Arc;
+
+ use api::v1::OpType;
+ use common_time::Timestamp;
+
+ use super::*;
+ use crate::cache::{CacheManager, PageKey};
+ use crate::read::Batch;
+ use crate::sst::parquet::reader::ParquetReaderBuilder;
+ use crate::sst::parquet::writer::ParquetWriter;
+ use crate::test_util::sst_util::{
+ new_primary_key, new_source, sst_file_handle, sst_region_metadata,
+ };
+ use crate::test_util::{check_reader_result, new_batch_builder, TestEnv};
+
+ const FILE_DIR: &str = "/";
+
+ fn new_batch_by_range(tags: &[&str], start: usize, end: usize) -> Batch {
+ assert!(end > start);
+ let pk = new_primary_key(tags);
+ let timestamps: Vec<_> = (start..end).map(|v| v as i64).collect();
+ let sequences = vec![1000; end - start];
+ let op_types = vec![OpType::Put; end - start];
+ let field: Vec<_> = (start..end).map(|v| v as u64).collect();
+ new_batch_builder(&pk, ×tamps, &sequences, &op_types, 2, &field)
+ .build()
+ .unwrap()
+ }
+
+ #[tokio::test]
+ async fn test_write_read() {
+ let mut env = TestEnv::new();
+ let object_store = env.init_object_store_manager();
+ let handle = sst_file_handle(0, 1000);
+ let file_path = handle.file_path(FILE_DIR);
+ let metadata = Arc::new(sst_region_metadata());
+ let source = new_source(&[
+ new_batch_by_range(&["a", "d"], 0, 60),
+ new_batch_by_range(&["b", "f"], 0, 40),
+ new_batch_by_range(&["b", "h"], 100, 200),
+ ]);
+ // Use a small row group size for test.
+ let write_opts = WriteOptions {
+ row_group_size: 50,
+ ..Default::default()
+ };
+
+ let mut writer = ParquetWriter::new(file_path, metadata, source, object_store.clone());
+ let info = writer.write_all(&write_opts).await.unwrap().unwrap();
+ assert_eq!(200, info.num_rows);
+ assert!(info.file_size > 0);
+ assert_eq!(
+ (
+ Timestamp::new_millisecond(0),
+ Timestamp::new_millisecond(199)
+ ),
+ info.time_range
+ );
+
+ let builder = ParquetReaderBuilder::new(FILE_DIR.to_string(), handle.clone(), object_store);
+ let mut reader = builder.build().await.unwrap();
+ check_reader_result(
+ &mut reader,
+ &[
+ new_batch_by_range(&["a", "d"], 0, 50),
+ new_batch_by_range(&["a", "d"], 50, 60),
+ new_batch_by_range(&["b", "f"], 0, 40),
+ new_batch_by_range(&["b", "h"], 100, 150),
+ new_batch_by_range(&["b", "h"], 150, 200),
+ ],
+ )
+ .await;
+ }
+
+ #[tokio::test]
+ async fn test_read_with_cache() {
+ let mut env = TestEnv::new();
+ let object_store = env.init_object_store_manager();
+ let handle = sst_file_handle(0, 1000);
+ let file_path = handle.file_path(FILE_DIR);
+ let metadata = Arc::new(sst_region_metadata());
+ let source = new_source(&[
+ new_batch_by_range(&["a", "d"], 0, 60),
+ new_batch_by_range(&["b", "f"], 0, 40),
+ new_batch_by_range(&["b", "h"], 100, 200),
+ ]);
+ // Use a small row group size for test.
+ let write_opts = WriteOptions {
+ row_group_size: 50,
+ ..Default::default()
+ };
+ // Prepare data.
+ let mut writer =
+ ParquetWriter::new(file_path, metadata.clone(), source, object_store.clone());
+ writer.write_all(&write_opts).await.unwrap().unwrap();
+
+ let cache = Some(Arc::new(CacheManager::new(0, 0, 64 * 1024 * 1024)));
+ let builder = ParquetReaderBuilder::new(FILE_DIR.to_string(), handle.clone(), object_store)
+ .cache(cache.clone());
+ for _ in 0..3 {
+ let mut reader = builder.build().await.unwrap();
+ check_reader_result(
+ &mut reader,
+ &[
+ new_batch_by_range(&["a", "d"], 0, 50),
+ new_batch_by_range(&["a", "d"], 50, 60),
+ new_batch_by_range(&["b", "f"], 0, 40),
+ new_batch_by_range(&["b", "h"], 100, 150),
+ new_batch_by_range(&["b", "h"], 150, 200),
+ ],
+ )
+ .await;
+ }
+
+ // Cache 4 row groups.
+ for i in 0..4 {
+ let page_key = PageKey {
+ region_id: metadata.region_id,
+ file_id: handle.file_id(),
+ row_group_idx: i,
+ column_idx: 0,
+ };
+ assert!(cache.as_ref().unwrap().get_pages(&page_key).is_some());
+ }
+ let page_key = PageKey {
+ region_id: metadata.region_id,
+ file_id: handle.file_id(),
+ row_group_idx: 5,
+ column_idx: 0,
+ };
+ assert!(cache.as_ref().unwrap().get_pages(&page_key).is_none());
+ }
+}
diff --git a/src/mito2/src/sst/parquet/page_reader.rs b/src/mito2/src/sst/parquet/page_reader.rs
new file mode 100644
index 000000000000..1416da448b5a
--- /dev/null
+++ b/src/mito2/src/sst/parquet/page_reader.rs
@@ -0,0 +1,92 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Parquet page reader.
+
+use std::collections::VecDeque;
+
+use parquet::column::page::{Page, PageMetadata, PageReader};
+use parquet::errors::Result;
+
+/// A reader that reads from cached pages.
+pub(crate) struct CachedPageReader {
+ /// Cached pages.
+ pages: VecDeque<Page>,
+}
+
+impl CachedPageReader {
+ /// Returns a new reader from existing pages.
+ pub(crate) fn new(pages: &[Page]) -> Self {
+ Self {
+ pages: pages.iter().cloned().collect(),
+ }
+ }
+}
+
+impl PageReader for CachedPageReader {
+ fn get_next_page(&mut self) -> Result<Option<Page>> {
+ Ok(self.pages.pop_front())
+ }
+
+ fn peek_next_page(&mut self) -> Result<Option<PageMetadata>> {
+ Ok(self.pages.front().map(page_to_page_meta))
+ }
+
+ fn skip_next_page(&mut self) -> Result<()> {
+ // When the `SerializedPageReader` is in `SerializedPageReaderState::Pages` state, it never pops
+ // the dictionary page. So it always return the dictionary page as the first page. See:
+ // https://github.com/apache/arrow-rs/blob/1d6feeacebb8d0d659d493b783ba381940973745/parquet/src/file/serialized_reader.rs#L766-L770
+ // But the `GenericColumnReader` will read the dictionary page before skipping records so it won't skip dictionary page.
+ // So we don't need to handle the dictionary page specifically in this method.
+ // https://github.com/apache/arrow-rs/blob/65f7be856099d389b0d0eafa9be47fad25215ee6/parquet/src/column/reader.rs#L322-L331
+ self.pages.pop_front();
+ Ok(())
+ }
+}
+
+impl Iterator for CachedPageReader {
+ type Item = Result<Page>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.get_next_page().transpose()
+ }
+}
+
+/// Get [PageMetadata] from `page`.
+///
+/// The conversion is based on [decode_page()](https://github.com/apache/arrow-rs/blob/1d6feeacebb8d0d659d493b783ba381940973745/parquet/src/file/serialized_reader.rs#L438-L481)
+/// and [PageMetadata](https://github.com/apache/arrow-rs/blob/65f7be856099d389b0d0eafa9be47fad25215ee6/parquet/src/column/page.rs#L279-L301).
+fn page_to_page_meta(page: &Page) -> PageMetadata {
+ match page {
+ Page::DataPage { num_values, .. } => PageMetadata {
+ num_rows: None,
+ num_levels: Some(*num_values as usize),
+ is_dict: false,
+ },
+ Page::DataPageV2 {
+ num_values,
+ num_rows,
+ ..
+ } => PageMetadata {
+ num_rows: Some(*num_rows as usize),
+ num_levels: Some(*num_values as usize),
+ is_dict: false,
+ },
+ Page::DictionaryPage { .. } => PageMetadata {
+ num_rows: None,
+ num_levels: None,
+ is_dict: true,
+ },
+ }
+}
diff --git a/src/mito2/src/sst/parquet/reader.rs b/src/mito2/src/sst/parquet/reader.rs
index a56e140ec033..49e38ddc4d3d 100644
--- a/src/mito2/src/sst/parquet/reader.rs
+++ b/src/mito2/src/sst/parquet/reader.rs
@@ -183,6 +183,7 @@ impl ParquetReaderBuilder {
file_reader: reader,
projection: projection_mask,
field_levels,
+ cache_manager: self.cache_manager.clone(),
};
let metrics = Metrics {
@@ -292,6 +293,8 @@ struct RowGroupReaderBuilder {
projection: ProjectionMask,
/// Field levels to read.
field_levels: FieldLevels,
+ /// Cache.
+ cache_manager: Option<CacheManagerRef>,
}
impl RowGroupReaderBuilder {
@@ -302,7 +305,13 @@ impl RowGroupReaderBuilder {
/// Builds a [ParquetRecordBatchReader] to read the row group at `row_group_idx`.
async fn build(&mut self, row_group_idx: usize) -> Result<ParquetRecordBatchReader> {
- let mut row_group = InMemoryRowGroup::create(&self.parquet_meta, row_group_idx);
+ let mut row_group = InMemoryRowGroup::create(
+ self.file_handle.region_id(),
+ self.file_handle.file_id(),
+ &self.parquet_meta,
+ row_group_idx,
+ self.cache_manager.clone(),
+ );
// Fetches data into memory.
row_group
.fetch(&mut self.file_reader, &self.projection, None)
@@ -334,6 +343,9 @@ pub struct ParquetReader {
/// Not `None` if [ParquetReader::stream] is not `None`.
read_format: ReadFormat,
/// Builder to build row group readers.
+ ///
+ /// The builder contains the file handle, so don't drop the builder while using
+ /// the [ParquetReader].
reader_builder: RowGroupReaderBuilder,
/// Reader of current row group.
current_reader: Option<ParquetRecordBatchReader>,
diff --git a/src/mito2/src/sst/parquet/row_group.rs b/src/mito2/src/sst/parquet/row_group.rs
index 7be77d692fc5..827db8999ae8 100644
--- a/src/mito2/src/sst/parquet/row_group.rs
+++ b/src/mito2/src/sst/parquet/row_group.rs
@@ -26,6 +26,11 @@ use parquet::file::metadata::{ParquetMetaData, RowGroupMetaData};
use parquet::file::reader::{ChunkReader, Length};
use parquet::file::serialized_reader::SerializedPageReader;
use parquet::format::PageLocation;
+use store_api::storage::RegionId;
+
+use crate::cache::{CacheManagerRef, PageKey, PageValue};
+use crate::sst::file::FileId;
+use crate::sst::parquet::page_reader::CachedPageReader;
/// An in-memory collection of column chunks
pub struct InMemoryRowGroup<'a> {
@@ -33,6 +38,14 @@ pub struct InMemoryRowGroup<'a> {
page_locations: Option<&'a [Vec<PageLocation>]>,
column_chunks: Vec<Option<Arc<ColumnChunkData>>>,
row_count: usize,
+ region_id: RegionId,
+ file_id: FileId,
+ row_group_idx: usize,
+ cache_manager: Option<CacheManagerRef>,
+ /// Cached pages for each column.
+ ///
+ /// `column_cached_pages.len()` equals to `column_chunks.len()`.
+ column_cached_pages: Vec<Option<Arc<PageValue>>>,
}
impl<'a> InMemoryRowGroup<'a> {
@@ -40,8 +53,17 @@ impl<'a> InMemoryRowGroup<'a> {
///
/// # Panics
/// Panics if the `row_group_idx` is invalid.
- pub fn create(parquet_meta: &'a ParquetMetaData, row_group_idx: usize) -> Self {
+ pub fn create(
+ region_id: RegionId,
+ file_id: FileId,
+ parquet_meta: &'a ParquetMetaData,
+ row_group_idx: usize,
+ cache_manager: Option<CacheManagerRef>,
+ ) -> Self {
let metadata = parquet_meta.row_group(row_group_idx);
+ // `page_locations` is always `None` if we don't set
+ // [with_page_index()](https://docs.rs/parquet/latest/parquet/arrow/arrow_reader/struct.ArrowReaderOptions.html#method.with_page_index)
+ // to `true`.
let page_locations = parquet_meta
.offset_index()
.map(|x| x[row_group_idx].as_slice());
@@ -51,6 +73,11 @@ impl<'a> InMemoryRowGroup<'a> {
row_count: metadata.num_rows() as usize,
column_chunks: vec![None; metadata.columns().len()],
page_locations,
+ region_id,
+ file_id,
+ row_group_idx,
+ cache_manager,
+ column_cached_pages: vec![None; metadata.columns().len()],
}
}
@@ -114,22 +141,39 @@ impl<'a> InMemoryRowGroup<'a> {
}
}
} else {
- let fetch_ranges = self
+ // Now we only use cache in dense chunk data.
+ self.fetch_pages_from_cache(projection);
+
+ let fetch_ranges: Vec<_> = self
.column_chunks
.iter()
+ .zip(&self.column_cached_pages)
.enumerate()
- .filter(|&(idx, chunk)| chunk.is_none() && projection.leaf_included(idx))
- .map(|(idx, _chunk)| {
+ // Don't need to fetch column data if we already cache the column's pages.
+ .filter(|&(idx, (chunk, cached_pages))| {
+ chunk.is_none() && projection.leaf_included(idx) && cached_pages.is_none()
+ })
+ .map(|(idx, (_chunk, _cached_pages))| {
let column = self.metadata.column(idx);
let (start, length) = column.byte_range();
start as usize..(start + length) as usize
})
.collect();
+ if fetch_ranges.is_empty() {
+ // Nothing to fetch.
+ return Ok(());
+ }
+
let mut chunk_data = input.get_byte_ranges(fetch_ranges).await?.into_iter();
- for (idx, chunk) in self.column_chunks.iter_mut().enumerate() {
- if chunk.is_some() || !projection.leaf_included(idx) {
+ for (idx, (chunk, cached_pages)) in self
+ .column_chunks
+ .iter_mut()
+ .zip(&self.column_cached_pages)
+ .enumerate()
+ {
+ if chunk.is_some() || !projection.leaf_included(idx) || cached_pages.is_some() {
continue;
}
@@ -144,32 +188,82 @@ impl<'a> InMemoryRowGroup<'a> {
Ok(())
}
-}
-impl<'a> RowGroups for InMemoryRowGroup<'a> {
- fn num_rows(&self) -> usize {
- self.row_count
+ /// Fetches pages for columns if cache is enabled.
+ fn fetch_pages_from_cache(&mut self, projection: &ProjectionMask) {
+ self.column_chunks
+ .iter()
+ .enumerate()
+ .filter(|&(idx, chunk)| chunk.is_none() && projection.leaf_included(idx))
+ .for_each(|(idx, _chunk)| {
+ if let Some(cache) = &self.cache_manager {
+ let page_key = PageKey {
+ region_id: self.region_id,
+ file_id: self.file_id,
+ row_group_idx: self.row_group_idx,
+ column_idx: idx,
+ };
+ self.column_cached_pages[idx] = cache.get_pages(&page_key);
+ }
+ });
}
- fn column_chunks(&self, i: usize) -> Result<Box<dyn PageIterator>> {
- match &self.column_chunks[i] {
- None => Err(ParquetError::General(format!(
- "Invalid column index {i}, column was not fetched"
- ))),
+ /// Creates a page reader to read column at `i`.
+ fn column_page_reader(&self, i: usize) -> Result<Box<dyn PageReader>> {
+ if let Some(cached_pages) = &self.column_cached_pages[i] {
+ // Already in cache.
+ return Ok(Box::new(CachedPageReader::new(&cached_pages.pages)));
+ }
+
+ // Cache miss.
+ let page_reader = match &self.column_chunks[i] {
+ None => {
+ return Err(ParquetError::General(format!(
+ "Invalid column index {i}, column was not fetched"
+ )))
+ }
Some(data) => {
let page_locations = self.page_locations.map(|index| index[i].clone());
- let page_reader: Box<dyn PageReader> = Box::new(SerializedPageReader::new(
+ SerializedPageReader::new(
data.clone(),
self.metadata.column(i),
self.row_count,
page_locations,
- )?);
-
- Ok(Box::new(ColumnChunkIterator {
- reader: Some(Ok(page_reader)),
- }))
+ )?
}
- }
+ };
+
+ let Some(cache) = &self.cache_manager else {
+ // Cache is disabled.
+ return Ok(Box::new(page_reader));
+ };
+
+ // We collect all pages and put them into the cache.
+ let pages = page_reader.collect::<Result<Vec<_>>>()?;
+ let page_value = Arc::new(PageValue::new(pages));
+ let page_key = PageKey {
+ region_id: self.region_id,
+ file_id: self.file_id,
+ row_group_idx: self.row_group_idx,
+ column_idx: i,
+ };
+ cache.put_pages(page_key, page_value.clone());
+
+ Ok(Box::new(CachedPageReader::new(&page_value.pages)))
+ }
+}
+
+impl<'a> RowGroups for InMemoryRowGroup<'a> {
+ fn num_rows(&self) -> usize {
+ self.row_count
+ }
+
+ fn column_chunks(&self, i: usize) -> Result<Box<dyn PageIterator>> {
+ let page_reader = self.column_page_reader(i)?;
+
+ Ok(Box::new(ColumnChunkIterator {
+ reader: Some(Ok(page_reader)),
+ }))
}
}
diff --git a/src/mito2/src/sst/parquet/writer.rs b/src/mito2/src/sst/parquet/writer.rs
index aa68a1e1c6cd..17d50dc3a3f1 100644
--- a/src/mito2/src/sst/parquet/writer.rs
+++ b/src/mito2/src/sst/parquet/writer.rs
@@ -163,5 +163,3 @@ impl SourceStats {
}
}
}
-
-// TODO(yingwen): Port tests.
diff --git a/src/mito2/src/test_util.rs b/src/mito2/src/test_util.rs
index 69bd22d26e1b..e602643ff5c5 100644
--- a/src/mito2/src/test_util.rs
+++ b/src/mito2/src/test_util.rs
@@ -17,6 +17,7 @@
pub mod memtable_util;
pub mod meta_util;
pub mod scheduler_util;
+pub mod sst_util;
pub mod version_util;
use std::collections::HashMap;
@@ -195,6 +196,12 @@ impl TestEnv {
)
}
+ /// Only initializes the object store manager, returns the default object store.
+ pub fn init_object_store_manager(&mut self) -> ObjectStore {
+ self.object_store_manager = Some(Arc::new(self.create_object_store_manager()));
+ self.get_object_store().unwrap()
+ }
+
/// Creates a new [WorkerGroup] with specific config under this env.
pub(crate) async fn create_worker_group(&self, config: MitoConfig) -> WorkerGroup {
let (log_store, object_store_manager) = self.create_log_and_object_store_manager().await;
@@ -207,14 +214,19 @@ impl TestEnv {
) -> (RaftEngineLogStore, ObjectStoreManager) {
let data_home = self.data_home.path();
let wal_path = data_home.join("wal");
- let data_path = data_home.join("data").as_path().display().to_string();
-
let log_store = log_store_util::create_tmp_local_file_log_store(&wal_path).await;
+
+ let object_store_manager = self.create_object_store_manager();
+ (log_store, object_store_manager)
+ }
+
+ fn create_object_store_manager(&self) -> ObjectStoreManager {
+ let data_home = self.data_home.path();
+ let data_path = data_home.join("data").as_path().display().to_string();
let mut builder = Fs::default();
builder.root(&data_path);
let object_store = ObjectStore::new(builder).unwrap().finish();
- let object_store_manager = ObjectStoreManager::new("default", object_store);
- (log_store, object_store_manager)
+ ObjectStoreManager::new("default", object_store)
}
/// If `initial_metadata` is `Some`, creates a new manifest. If `initial_metadata`
@@ -414,6 +426,7 @@ pub fn new_batch_builder(
timestamps: &[i64],
sequences: &[u64],
op_types: &[OpType],
+ field_column_id: ColumnId,
field: &[u64],
) -> BatchBuilder {
let mut builder = BatchBuilder::new(primary_key.to_vec());
@@ -431,13 +444,14 @@ pub fn new_batch_builder(
)))
.unwrap()
.push_field_array(
- 1,
+ field_column_id,
Arc::new(UInt64Array::from_iter_values(field.iter().copied())),
)
.unwrap();
builder
}
+/// Returns a new [Batch] whose field has column id 1.
pub fn new_batch(
primary_key: &[u8],
timestamps: &[i64],
@@ -445,7 +459,7 @@ pub fn new_batch(
op_types: &[OpType],
field: &[u64],
) -> Batch {
- new_batch_builder(primary_key, timestamps, sequences, op_types, field)
+ new_batch_builder(primary_key, timestamps, sequences, op_types, 1, field)
.build()
.unwrap()
}
diff --git a/src/mito2/src/test_util/sst_util.rs b/src/mito2/src/test_util/sst_util.rs
new file mode 100644
index 000000000000..3638d119faa1
--- /dev/null
+++ b/src/mito2/src/test_util/sst_util.rs
@@ -0,0 +1,112 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Utilities for testing SSTs.
+
+use api::v1::SemanticType;
+use common_time::Timestamp;
+use datatypes::prelude::ConcreteDataType;
+use datatypes::schema::ColumnSchema;
+use datatypes::value::ValueRef;
+use store_api::metadata::{ColumnMetadata, RegionMetadata, RegionMetadataBuilder};
+use store_api::storage::RegionId;
+
+use crate::read::{Batch, Source};
+use crate::row_converter::{McmpRowCodec, RowCodec, SortField};
+use crate::sst::file::{FileHandle, FileId, FileMeta};
+use crate::test_util::{new_noop_file_purger, VecBatchReader};
+
+/// Test region id.
+const REGION_ID: RegionId = RegionId::new(0, 0);
+
+/// Creates a new region metadata for testing SSTs.
+///
+/// Schema: tag_0, tag_1, field_0, ts
+pub fn sst_region_metadata() -> RegionMetadata {
+ let mut builder = RegionMetadataBuilder::new(REGION_ID);
+ builder
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new(
+ "tag_0".to_string(),
+ ConcreteDataType::string_datatype(),
+ true,
+ ),
+ semantic_type: SemanticType::Tag,
+ column_id: 0,
+ })
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new(
+ "tag_1".to_string(),
+ ConcreteDataType::string_datatype(),
+ true,
+ ),
+ semantic_type: SemanticType::Tag,
+ column_id: 1,
+ })
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new(
+ "field_0".to_string(),
+ ConcreteDataType::uint64_datatype(),
+ true,
+ ),
+ semantic_type: SemanticType::Field,
+ column_id: 2,
+ })
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new(
+ "ts".to_string(),
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ false,
+ ),
+ semantic_type: SemanticType::Timestamp,
+ column_id: 3,
+ })
+ .primary_key(vec![0, 1]);
+ builder.build().unwrap()
+}
+
+/// Encodes a primary key for specific tags.
+pub fn new_primary_key(tags: &[&str]) -> Vec<u8> {
+ let fields = (0..tags.len())
+ .map(|_| SortField::new(ConcreteDataType::string_datatype()))
+ .collect();
+ let converter = McmpRowCodec::new(fields);
+ converter
+ .encode(tags.iter().map(|tag| ValueRef::String(tag)))
+ .unwrap()
+}
+
+/// Creates a [Source] from `batches`.
+pub fn new_source(batches: &[Batch]) -> Source {
+ let reader = VecBatchReader::new(batches);
+ Source::Reader(Box::new(reader))
+}
+
+/// Creates a new [FileHandle] for a SST.
+pub fn sst_file_handle(start_ms: i64, end_ms: i64) -> FileHandle {
+ let file_purger = new_noop_file_purger();
+ FileHandle::new(
+ FileMeta {
+ region_id: REGION_ID,
+ file_id: FileId::random(),
+ time_range: (
+ Timestamp::new_millisecond(start_ms),
+ Timestamp::new_millisecond(end_ms),
+ ),
+ level: 0,
+ file_size: 0,
+ },
+ file_purger,
+ )
+}
diff --git a/src/mito2/src/worker.rs b/src/mito2/src/worker.rs
index ebff32be789d..64f7472c3842 100644
--- a/src/mito2/src/worker.rs
+++ b/src/mito2/src/worker.rs
@@ -122,6 +122,7 @@ impl WorkerGroup {
let cache_manager = Arc::new(CacheManager::new(
config.sst_meta_cache_size.as_bytes(),
config.vector_cache_size.as_bytes(),
+ config.page_cache_size.as_bytes(),
));
let workers = (0..config.num_workers)
@@ -219,6 +220,7 @@ impl WorkerGroup {
let cache_manager = Arc::new(CacheManager::new(
config.sst_meta_cache_size.as_bytes(),
config.vector_cache_size.as_bytes(),
+ config.page_cache_size.as_bytes(),
));
let workers = (0..config.num_workers)
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index c8f7ed871756..e5b398e2a673 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -743,6 +743,7 @@ global_write_buffer_size = "1GiB"
global_write_buffer_reject_size = "2GiB"
sst_meta_cache_size = "128MiB"
vector_cache_size = "512MiB"
+page_cache_size = "512MiB"
sst_write_buffer_size = "8MiB"
[[datanode.region_engine]]
|
feat
|
implements row group level page cache (#2688)
|
92a8e863ded618fe1be93f799360015b4f8f28b6
|
2024-03-27 17:09:23
|
JeremyHi
|
chore: do not reply for broadcast msg (#3595)
| false
|
diff --git a/src/common/meta/src/instruction.rs b/src/common/meta/src/instruction.rs
index 4b0055551615..caefe2eb221e 100644
--- a/src/common/meta/src/instruction.rs
+++ b/src/common/meta/src/instruction.rs
@@ -203,7 +203,6 @@ pub enum InstructionReply {
OpenRegion(SimpleReply),
CloseRegion(SimpleReply),
UpgradeRegion(UpgradeRegionReply),
- InvalidateTableCache(SimpleReply),
DowngradeRegion(DowngradeRegionReply),
}
@@ -213,9 +212,6 @@ impl Display for InstructionReply {
Self::OpenRegion(reply) => write!(f, "InstructionReply::OpenRegion({})", reply),
Self::CloseRegion(reply) => write!(f, "InstructionReply::CloseRegion({})", reply),
Self::UpgradeRegion(reply) => write!(f, "InstructionReply::UpgradeRegion({})", reply),
- Self::InvalidateTableCache(reply) => {
- write!(f, "InstructionReply::Invalidate({})", reply)
- }
Self::DowngradeRegion(reply) => {
write!(f, "InstructionReply::DowngradeRegion({})", reply)
}
diff --git a/src/frontend/src/heartbeat/handler/invalidate_table_cache.rs b/src/frontend/src/heartbeat/handler/invalidate_table_cache.rs
index 48abd8fadda4..3cda489035b1 100644
--- a/src/frontend/src/heartbeat/handler/invalidate_table_cache.rs
+++ b/src/frontend/src/heartbeat/handler/invalidate_table_cache.rs
@@ -18,8 +18,8 @@ use common_meta::error::Result as MetaResult;
use common_meta::heartbeat::handler::{
HandleControl, HeartbeatResponseHandler, HeartbeatResponseHandlerContext,
};
-use common_meta::instruction::{Instruction, InstructionReply, SimpleReply};
-use common_telemetry::error;
+use common_meta::instruction::Instruction;
+use common_telemetry::debug;
#[derive(Clone)]
pub struct InvalidateTableCacheHandler {
@@ -36,35 +36,20 @@ impl HeartbeatResponseHandler for InvalidateTableCacheHandler {
}
async fn handle(&self, ctx: &mut HeartbeatResponseHandlerContext) -> MetaResult<HandleControl> {
- let mailbox = ctx.mailbox.clone();
- let cache_invalidator = self.cache_invalidator.clone();
-
- let (meta, invalidator) = match ctx.incoming_message.take() {
- Some((meta, Instruction::InvalidateCaches(caches))) => (meta, async move {
- cache_invalidator
- .invalidate(&Context::default(), caches)
- .await
- }),
- _ => unreachable!("InvalidateTableCacheHandler: should be guarded by 'is_acceptable'"),
+ let Some((_, Instruction::InvalidateCaches(caches))) = ctx.incoming_message.take() else {
+ unreachable!("InvalidateTableCacheHandler: should be guarded by 'is_acceptable'")
};
- let _handle = common_runtime::spawn_bg(async move {
- // Local cache invalidation always succeeds.
- let _ = invalidator.await;
+ debug!(
+ "InvalidateTableCacheHandler: invalidating caches: {:?}",
+ caches
+ );
- if let Err(e) = mailbox
- .send((
- meta,
- InstructionReply::InvalidateTableCache(SimpleReply {
- result: true,
- error: None,
- }),
- ))
- .await
- {
- error!(e; "Failed to send reply to mailbox");
- }
- });
+ // Invalidate local cache always success
+ let _ = self
+ .cache_invalidator
+ .invalidate(&Context::default(), caches)
+ .await?;
Ok(HandleControl::Done)
}
diff --git a/src/frontend/src/heartbeat/handler/tests.rs b/src/frontend/src/heartbeat/handler/tests.rs
index f23558cc7e16..1b6885ddb777 100644
--- a/src/frontend/src/heartbeat/handler/tests.rs
+++ b/src/frontend/src/heartbeat/handler/tests.rs
@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::assert_matches::assert_matches;
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
@@ -22,7 +21,7 @@ use common_meta::heartbeat::handler::{
HandlerGroupExecutor, HeartbeatResponseHandlerContext, HeartbeatResponseHandlerExecutor,
};
use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MessageMeta};
-use common_meta::instruction::{CacheIdent, Instruction, InstructionReply, SimpleReply};
+use common_meta::instruction::{CacheIdent, Instruction};
use common_meta::key::table_info::TableInfoKey;
use common_meta::key::TableMetaKey;
use partition::manager::TableRouteCacheInvalidator;
@@ -67,7 +66,7 @@ async fn test_invalidate_table_cache_handler() {
InvalidateTableCacheHandler::new(backend.clone()),
)]));
- let (tx, mut rx) = mpsc::channel(8);
+ let (tx, _) = mpsc::channel(8);
let mailbox = Arc::new(HeartbeatMailbox::new(tx));
// removes a valid key
@@ -78,11 +77,6 @@ async fn test_invalidate_table_cache_handler() {
)
.await;
- let (_, reply) = rx.recv().await.unwrap();
- assert_matches!(
- reply,
- InstructionReply::InvalidateTableCache(SimpleReply { result: true, .. })
- );
assert!(!backend
.inner
.lock()
@@ -96,12 +90,6 @@ async fn test_invalidate_table_cache_handler() {
Instruction::InvalidateCaches(vec![CacheIdent::TableId(0)]),
)
.await;
-
- let (_, reply) = rx.recv().await.unwrap();
- assert_matches!(
- reply,
- InstructionReply::InvalidateTableCache(SimpleReply { result: true, .. })
- );
}
pub fn test_message_meta(id: u64, subject: &str, to: &str, from: &str) -> MessageMeta {
|
chore
|
do not reply for broadcast msg (#3595)
|
f4190cfca6f2c8b8cfb4a0b94b95df357c08b2b5
|
2023-04-17 17:40:36
|
Ruihang Xia
|
fix: table scan without projection (#1404)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index c42fc74b9d5d..3d8630f20b3b 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2253,7 +2253,7 @@ dependencies = [
[[package]]
name = "datafusion"
version = "22.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=74a778ca6016a853a3c3add3fa8c6f12f4fe4561#74a778ca6016a853a3c3add3fa8c6f12f4fe4561"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793#b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793"
dependencies = [
"ahash 0.8.3",
"arrow",
@@ -2302,7 +2302,7 @@ dependencies = [
[[package]]
name = "datafusion-common"
version = "22.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=74a778ca6016a853a3c3add3fa8c6f12f4fe4561#74a778ca6016a853a3c3add3fa8c6f12f4fe4561"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793#b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793"
dependencies = [
"arrow",
"arrow-array",
@@ -2316,7 +2316,7 @@ dependencies = [
[[package]]
name = "datafusion-execution"
version = "22.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=74a778ca6016a853a3c3add3fa8c6f12f4fe4561#74a778ca6016a853a3c3add3fa8c6f12f4fe4561"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793#b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793"
dependencies = [
"dashmap",
"datafusion-common",
@@ -2333,7 +2333,7 @@ dependencies = [
[[package]]
name = "datafusion-expr"
version = "22.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=74a778ca6016a853a3c3add3fa8c6f12f4fe4561#74a778ca6016a853a3c3add3fa8c6f12f4fe4561"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793#b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793"
dependencies = [
"ahash 0.8.3",
"arrow",
@@ -2344,7 +2344,7 @@ dependencies = [
[[package]]
name = "datafusion-optimizer"
version = "22.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=74a778ca6016a853a3c3add3fa8c6f12f4fe4561#74a778ca6016a853a3c3add3fa8c6f12f4fe4561"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793#b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793"
dependencies = [
"arrow",
"async-trait",
@@ -2361,7 +2361,7 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
version = "22.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=74a778ca6016a853a3c3add3fa8c6f12f4fe4561#74a778ca6016a853a3c3add3fa8c6f12f4fe4561"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793#b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793"
dependencies = [
"ahash 0.8.3",
"arrow",
@@ -2392,7 +2392,7 @@ dependencies = [
[[package]]
name = "datafusion-row"
version = "22.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=74a778ca6016a853a3c3add3fa8c6f12f4fe4561#74a778ca6016a853a3c3add3fa8c6f12f4fe4561"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793#b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793"
dependencies = [
"arrow",
"datafusion-common",
@@ -2403,7 +2403,7 @@ dependencies = [
[[package]]
name = "datafusion-sql"
version = "22.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=74a778ca6016a853a3c3add3fa8c6f12f4fe4561#74a778ca6016a853a3c3add3fa8c6f12f4fe4561"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793#b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793"
dependencies = [
"arrow",
"arrow-schema",
@@ -2416,7 +2416,7 @@ dependencies = [
[[package]]
name = "datafusion-substrait"
version = "22.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=74a778ca6016a853a3c3add3fa8c6f12f4fe4561#74a778ca6016a853a3c3add3fa8c6f12f4fe4561"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793#b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793"
dependencies = [
"async-recursion",
"chrono",
diff --git a/Cargo.toml b/Cargo.toml
index d2391c76231c..08d09b1b4a9f 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -59,13 +59,14 @@ arrow-schema = { version = "37.0", features = ["serde"] }
async-stream = "0.3"
async-trait = "0.1"
chrono = { version = "0.4", features = ["serde"] }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
-datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
-datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
-datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
-datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
-datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
-datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
+# TODO(ruihang): use arrow-datafusion when it contains https://github.com/apache/arrow-datafusion/pull/6032
+datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
+datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
+datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
+datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
+datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
+datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
+datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b14f7a9ffe91257fc3d2a5d654f2a1a14a8fc793" }
futures = "0.3"
futures-util = "0.3"
parquet = "37.0"
diff --git a/tests/cases/standalone/copy/copy_from_fs.result b/tests/cases/standalone/common/copy/copy_from_fs.result
similarity index 100%
rename from tests/cases/standalone/copy/copy_from_fs.result
rename to tests/cases/standalone/common/copy/copy_from_fs.result
diff --git a/tests/cases/standalone/copy/copy_from_fs.sql b/tests/cases/standalone/common/copy/copy_from_fs.sql
similarity index 100%
rename from tests/cases/standalone/copy/copy_from_fs.sql
rename to tests/cases/standalone/common/copy/copy_from_fs.sql
diff --git a/tests/cases/standalone/copy/copy_to_fs.result b/tests/cases/standalone/common/copy/copy_to_fs.result
similarity index 100%
rename from tests/cases/standalone/copy/copy_to_fs.result
rename to tests/cases/standalone/common/copy/copy_to_fs.result
diff --git a/tests/cases/standalone/copy/copy_to_fs.sql b/tests/cases/standalone/common/copy/copy_to_fs.sql
similarity index 100%
rename from tests/cases/standalone/copy/copy_to_fs.sql
rename to tests/cases/standalone/common/copy/copy_to_fs.sql
|
fix
|
table scan without projection (#1404)
|
df751c38b4014641da0cf59a9e13ab5d4d44355b
|
2023-02-27 08:30:15
|
LFC
|
feat: a simple REPL for debugging purpose (#1048)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index bd28e140c19e..aa2a385182ff 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1350,14 +1350,21 @@ dependencies = [
"anymap",
"build-data",
"clap 3.2.23",
+ "client",
"common-base",
"common-error",
+ "common-query",
+ "common-recordbatch",
"common-telemetry",
"datanode",
+ "either",
"frontend",
"futures",
"meta-client",
"meta-srv",
+ "nu-ansi-term",
+ "rexpect",
+ "rustyline",
"serde",
"servers",
"snafu",
@@ -1387,6 +1394,12 @@ dependencies = [
"unicode-width",
]
+[[package]]
+name = "comma"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
+
[[package]]
name = "common-base"
version = "0.1.0"
@@ -5904,6 +5917,19 @@ dependencies = [
"syn-ext",
]
+[[package]]
+name = "rexpect"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "01ff60778f96fb5a48adbe421d21bf6578ed58c0872d712e7e08593c195adff8"
+dependencies = [
+ "comma",
+ "nix 0.25.1",
+ "regex",
+ "tempfile",
+ "thiserror",
+]
+
[[package]]
name = "ring"
version = "0.16.20"
diff --git a/src/cmd/Cargo.toml b/src/cmd/Cargo.toml
index b55960817fba..2a0c891d45c9 100644
--- a/src/cmd/Cargo.toml
+++ b/src/cmd/Cargo.toml
@@ -12,16 +12,22 @@ path = "src/bin/greptime.rs"
[dependencies]
anymap = "1.0.0-beta.2"
clap = { version = "3.1", features = ["derive"] }
+client = { path = "../client" }
common-base = { path = "../common/base" }
common-error = { path = "../common/error" }
+common-query = { path = "../common/query" }
+common-recordbatch = { path = "../common/recordbatch" }
common-telemetry = { path = "../common/telemetry", features = [
"deadlock_detection",
] }
datanode = { path = "../datanode" }
+either = "1.8"
frontend = { path = "../frontend" }
futures.workspace = true
meta-client = { path = "../meta-client" }
meta-srv = { path = "../meta-srv" }
+nu-ansi-term = "0.46"
+rustyline = "10.1"
serde.workspace = true
servers = { path = "../servers" }
snafu.workspace = true
@@ -29,6 +35,7 @@ tokio.workspace = true
toml = "0.5"
[dev-dependencies]
+rexpect = "0.5"
serde.workspace = true
tempdir = "0.3"
diff --git a/src/cmd/src/bin/greptime.rs b/src/cmd/src/bin/greptime.rs
index 6ee20faf4440..b1dec90ce89d 100644
--- a/src/cmd/src/bin/greptime.rs
+++ b/src/cmd/src/bin/greptime.rs
@@ -16,7 +16,7 @@ use std::fmt;
use clap::Parser;
use cmd::error::Result;
-use cmd::{datanode, frontend, metasrv, standalone};
+use cmd::{cli, datanode, frontend, metasrv, standalone};
use common_telemetry::logging::{error, info};
#[derive(Parser)]
@@ -46,6 +46,8 @@ enum SubCommand {
Metasrv(metasrv::Command),
#[clap(name = "standalone")]
Standalone(standalone::Command),
+ #[clap(name = "cli")]
+ Cli(cli::Command),
}
impl SubCommand {
@@ -55,6 +57,7 @@ impl SubCommand {
SubCommand::Frontend(cmd) => cmd.run().await,
SubCommand::Metasrv(cmd) => cmd.run().await,
SubCommand::Standalone(cmd) => cmd.run().await,
+ SubCommand::Cli(cmd) => cmd.run().await,
}
}
}
@@ -66,6 +69,7 @@ impl fmt::Display for SubCommand {
SubCommand::Frontend(..) => write!(f, "greptime-frontend"),
SubCommand::Metasrv(..) => write!(f, "greptime-metasrv"),
SubCommand::Standalone(..) => write!(f, "greptime-standalone"),
+ SubCommand::Cli(_) => write!(f, "greptime-cli"),
}
}
}
diff --git a/src/cmd/src/cli.rs b/src/cmd/src/cli.rs
new file mode 100644
index 000000000000..6de7a91a3907
--- /dev/null
+++ b/src/cmd/src/cli.rs
@@ -0,0 +1,62 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+mod cmd;
+mod helper;
+mod repl;
+
+use clap::Parser;
+use repl::Repl;
+
+use crate::error::Result;
+
+#[derive(Parser)]
+pub struct Command {
+ #[clap(subcommand)]
+ cmd: SubCommand,
+}
+
+impl Command {
+ pub async fn run(self) -> Result<()> {
+ self.cmd.run().await
+ }
+}
+
+#[derive(Parser)]
+enum SubCommand {
+ Attach(AttachCommand),
+}
+
+impl SubCommand {
+ async fn run(self) -> Result<()> {
+ match self {
+ SubCommand::Attach(cmd) => cmd.run().await,
+ }
+ }
+}
+
+#[derive(Debug, Parser)]
+pub(crate) struct AttachCommand {
+ #[clap(long)]
+ pub(crate) grpc_addr: String,
+ #[clap(long, action)]
+ pub(crate) disable_helper: bool,
+}
+
+impl AttachCommand {
+ async fn run(self) -> Result<()> {
+ let mut repl = Repl::try_new(&self)?;
+ repl.run().await
+ }
+}
diff --git a/src/cmd/src/cli/cmd.rs b/src/cmd/src/cli/cmd.rs
new file mode 100644
index 000000000000..557a02b38500
--- /dev/null
+++ b/src/cmd/src/cli/cmd.rs
@@ -0,0 +1,154 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::error::{Error, InvalidReplCommandSnafu, Result};
+
+/// Represents the parsed command from the user (which may be over many lines)
+#[derive(Debug, PartialEq)]
+pub(crate) enum ReplCommand {
+ Help,
+ UseDatabase { db_name: String },
+ Sql { sql: String },
+ Exit,
+}
+
+impl TryFrom<&str> for ReplCommand {
+ type Error = Error;
+
+ fn try_from(input: &str) -> Result<Self> {
+ let input = input.trim();
+ if input.is_empty() {
+ return InvalidReplCommandSnafu {
+ reason: "No command specified".to_string(),
+ }
+ .fail();
+ }
+
+ // If line ends with ';', it must be treated as a complete input.
+ // However, the opposite is not true.
+ let input_is_completed = input.ends_with(';');
+
+ let input = input.strip_suffix(';').map(|x| x.trim()).unwrap_or(input);
+ let lowercase = input.to_lowercase();
+ match lowercase.as_str() {
+ "help" => Ok(Self::Help),
+ "exit" | "quit" => Ok(Self::Exit),
+ _ => match input.split_once(' ') {
+ Some((maybe_use, database)) if maybe_use.to_lowercase() == "use" => {
+ Ok(Self::UseDatabase {
+ db_name: database.trim().to_string(),
+ })
+ }
+ // Any valid SQL must contains at least one whitespace.
+ Some(_) if input_is_completed => Ok(Self::Sql {
+ sql: input.to_string(),
+ }),
+ _ => InvalidReplCommandSnafu {
+ reason: format!("unknown command '{input}', maybe input is not completed"),
+ }
+ .fail(),
+ },
+ }
+ }
+}
+
+impl ReplCommand {
+ pub fn help() -> &'static str {
+ r#"
+Available commands (case insensitive):
+- 'help': print this help
+- 'exit' or 'quit': exit the REPL
+- 'use <your database name>': switch to another database/schema context
+- Other typed in text will be treated as SQL.
+ You can enter new line while typing, just remember to end it with ';'.
+"#
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::error::Error::InvalidReplCommand;
+
+ #[test]
+ fn test_from_str() {
+ fn test_ok(s: &str, expected: ReplCommand) {
+ let actual: ReplCommand = s.try_into().unwrap();
+ assert_eq!(expected, actual, "'{}'", s);
+ }
+
+ fn test_err(s: &str) {
+ let result: Result<ReplCommand> = s.try_into();
+ assert!(matches!(result, Err(InvalidReplCommand { .. })))
+ }
+
+ test_err("");
+ test_err(" ");
+ test_err("\t");
+
+ test_ok("help", ReplCommand::Help);
+ test_ok("help", ReplCommand::Help);
+ test_ok(" help", ReplCommand::Help);
+ test_ok(" help ", ReplCommand::Help);
+ test_ok(" HELP ", ReplCommand::Help);
+ test_ok(" Help; ", ReplCommand::Help);
+ test_ok(" help ; ", ReplCommand::Help);
+
+ test_ok("exit", ReplCommand::Exit);
+ test_ok("exit;", ReplCommand::Exit);
+ test_ok("exit ;", ReplCommand::Exit);
+ test_ok("EXIT", ReplCommand::Exit);
+
+ test_ok("quit", ReplCommand::Exit);
+ test_ok("quit;", ReplCommand::Exit);
+ test_ok("quit ;", ReplCommand::Exit);
+ test_ok("QUIT", ReplCommand::Exit);
+
+ test_ok(
+ "use Foo",
+ ReplCommand::UseDatabase {
+ db_name: "Foo".to_string(),
+ },
+ );
+ test_ok(
+ " use Foo ; ",
+ ReplCommand::UseDatabase {
+ db_name: "Foo".to_string(),
+ },
+ );
+ // ensure that database name is case sensitive
+ test_ok(
+ " use FOO ; ",
+ ReplCommand::UseDatabase {
+ db_name: "FOO".to_string(),
+ },
+ );
+
+ // ensure that we aren't messing with capitalization
+ test_ok(
+ "SELECT * from foo;",
+ ReplCommand::Sql {
+ sql: "SELECT * from foo".to_string(),
+ },
+ );
+ // Input line (that don't belong to any other cases above) must ends with ';' to make it a valid SQL.
+ test_err("insert blah");
+ test_ok(
+ "insert blah;",
+ ReplCommand::Sql {
+ sql: "insert blah".to_string(),
+ },
+ );
+ }
+}
diff --git a/src/cmd/src/cli/helper.rs b/src/cmd/src/cli/helper.rs
new file mode 100644
index 000000000000..08b12595149e
--- /dev/null
+++ b/src/cmd/src/cli/helper.rs
@@ -0,0 +1,112 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::borrow::Cow;
+
+use rustyline::completion::Completer;
+use rustyline::highlight::{Highlighter, MatchingBracketHighlighter};
+use rustyline::hint::{Hinter, HistoryHinter};
+use rustyline::validate::{ValidationContext, ValidationResult, Validator};
+
+use crate::cli::cmd::ReplCommand;
+
+pub(crate) struct RustylineHelper {
+ hinter: HistoryHinter,
+ highlighter: MatchingBracketHighlighter,
+}
+
+impl Default for RustylineHelper {
+ fn default() -> Self {
+ Self {
+ hinter: HistoryHinter {},
+ highlighter: MatchingBracketHighlighter::default(),
+ }
+ }
+}
+
+impl rustyline::Helper for RustylineHelper {}
+
+impl Validator for RustylineHelper {
+ fn validate(&self, ctx: &mut ValidationContext<'_>) -> rustyline::Result<ValidationResult> {
+ let input = ctx.input();
+ match ReplCommand::try_from(input) {
+ Ok(_) => Ok(ValidationResult::Valid(None)),
+ Err(e) => {
+ if input.trim_end().ends_with(';') {
+ // If line ends with ';', it HAS to be a valid command.
+ Ok(ValidationResult::Invalid(Some(e.to_string())))
+ } else {
+ Ok(ValidationResult::Incomplete)
+ }
+ }
+ }
+ }
+}
+
+impl Hinter for RustylineHelper {
+ type Hint = String;
+
+ fn hint(&self, line: &str, pos: usize, ctx: &rustyline::Context<'_>) -> Option<Self::Hint> {
+ self.hinter.hint(line, pos, ctx)
+ }
+}
+
+impl Highlighter for RustylineHelper {
+ fn highlight<'l>(&self, line: &'l str, pos: usize) -> Cow<'l, str> {
+ self.highlighter.highlight(line, pos)
+ }
+
+ fn highlight_prompt<'b, 's: 'b, 'p: 'b>(
+ &'s self,
+ prompt: &'p str,
+ default: bool,
+ ) -> Cow<'b, str> {
+ self.highlighter.highlight_prompt(prompt, default)
+ }
+
+ fn highlight_hint<'h>(&self, hint: &'h str) -> Cow<'h, str> {
+ use nu_ansi_term::Style;
+ Cow::Owned(Style::new().dimmed().paint(hint).to_string())
+ }
+
+ fn highlight_candidate<'c>(
+ &self,
+ candidate: &'c str,
+ completion: rustyline::CompletionType,
+ ) -> Cow<'c, str> {
+ self.highlighter.highlight_candidate(candidate, completion)
+ }
+
+ fn highlight_char(&self, line: &str, pos: usize) -> bool {
+ self.highlighter.highlight_char(line, pos)
+ }
+}
+
+impl Completer for RustylineHelper {
+ type Candidate = String;
+
+ fn complete(
+ &self,
+ line: &str,
+ pos: usize,
+ ctx: &rustyline::Context<'_>,
+ ) -> rustyline::Result<(usize, Vec<Self::Candidate>)> {
+ // If there is a hint, use that as the auto-complete when user hits `tab`
+ if let Some(hint) = self.hinter.hint(line, pos, ctx) {
+ Ok((pos, vec![hint]))
+ } else {
+ Ok((0, vec![]))
+ }
+ }
+}
diff --git a/src/cmd/src/cli/repl.rs b/src/cmd/src/cli/repl.rs
new file mode 100644
index 000000000000..ae0e4d0c4602
--- /dev/null
+++ b/src/cmd/src/cli/repl.rs
@@ -0,0 +1,199 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::path::PathBuf;
+use std::time::Instant;
+
+use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
+use common_error::prelude::ErrorExt;
+use common_query::Output;
+use common_recordbatch::RecordBatches;
+use common_telemetry::logging;
+use either::Either;
+use rustyline::error::ReadlineError;
+use rustyline::Editor;
+use snafu::{ErrorCompat, ResultExt};
+
+use crate::cli::cmd::ReplCommand;
+use crate::cli::helper::RustylineHelper;
+use crate::cli::AttachCommand;
+use crate::error::{
+ CollectRecordBatchesSnafu, PrettyPrintRecordBatchesSnafu, ReadlineSnafu, ReplCreationSnafu,
+ RequestDatabaseSnafu, Result,
+};
+
+/// Captures the state of the repl, gathers commands and executes them one by one
+pub(crate) struct Repl {
+ /// Rustyline editor for interacting with user on command line
+ rl: Editor<RustylineHelper>,
+
+ /// Current prompt
+ prompt: String,
+
+ /// Client for interacting with GreptimeDB
+ database: Database,
+}
+
+#[allow(clippy::print_stdout)]
+impl Repl {
+ fn print_help(&self) {
+ println!("{}", ReplCommand::help())
+ }
+
+ pub(crate) fn try_new(cmd: &AttachCommand) -> Result<Self> {
+ let mut rl = Editor::new().context(ReplCreationSnafu)?;
+
+ if !cmd.disable_helper {
+ rl.set_helper(Some(RustylineHelper::default()));
+
+ let history_file = history_file();
+ if let Err(e) = rl.load_history(&history_file) {
+ logging::debug!(
+ "failed to load history file on {}, error: {e}",
+ history_file.display()
+ );
+ }
+ }
+
+ let client = Client::with_urls([&cmd.grpc_addr]);
+ let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
+
+ Ok(Self {
+ rl,
+ prompt: "> ".to_string(),
+ database,
+ })
+ }
+
+ /// Parse the next command
+ fn next_command(&mut self) -> Result<ReplCommand> {
+ match self.rl.readline(&self.prompt) {
+ Ok(ref line) => {
+ let request = line.trim();
+
+ self.rl.add_history_entry(request.to_string());
+
+ request.try_into()
+ }
+ Err(ReadlineError::Eof) | Err(ReadlineError::Interrupted) => Ok(ReplCommand::Exit),
+ // Some sort of real underlying error
+ Err(e) => Err(e).context(ReadlineSnafu),
+ }
+ }
+
+ /// Read Evaluate Print Loop (interactive command line) for GreptimeDB
+ ///
+ /// Inspired / based on repl.rs from InfluxDB IOX
+ pub(crate) async fn run(&mut self) -> Result<()> {
+ println!("Ready for commands. (Hint: try 'help')");
+
+ loop {
+ match self.next_command()? {
+ ReplCommand::Help => {
+ self.print_help();
+ }
+ ReplCommand::UseDatabase { db_name } => {
+ if self.execute_sql(format!("USE {db_name}")).await {
+ println!("Using {db_name}");
+ self.database.set_schema(&db_name);
+ self.prompt = format!("[{db_name}] > ");
+ }
+ }
+ ReplCommand::Sql { sql } => {
+ self.execute_sql(sql).await;
+ }
+ ReplCommand::Exit => {
+ return Ok(());
+ }
+ }
+ }
+ }
+
+ async fn execute_sql(&self, sql: String) -> bool {
+ self.do_execute_sql(sql)
+ .await
+ .map_err(|e| {
+ let status_code = e.status_code();
+ let root_cause = e.iter_chain().last().unwrap();
+ println!("Error: {}({status_code}), {root_cause}", status_code as u32)
+ })
+ .is_ok()
+ }
+
+ async fn do_execute_sql(&self, sql: String) -> Result<()> {
+ let start = Instant::now();
+
+ let output = self
+ .database
+ .sql(&sql)
+ .await
+ .context(RequestDatabaseSnafu { sql: &sql })?;
+
+ let either = match output {
+ Output::Stream(s) => {
+ let x = RecordBatches::try_collect(s)
+ .await
+ .context(CollectRecordBatchesSnafu)?;
+ Either::Left(x)
+ }
+ Output::RecordBatches(x) => Either::Left(x),
+ Output::AffectedRows(rows) => Either::Right(rows),
+ };
+
+ let end = Instant::now();
+
+ match either {
+ Either::Left(recordbatches) => {
+ let total_rows: usize = recordbatches.iter().map(|x| x.num_rows()).sum();
+ if total_rows > 0 {
+ println!(
+ "{}",
+ recordbatches
+ .pretty_print()
+ .context(PrettyPrintRecordBatchesSnafu)?
+ );
+ }
+ println!("Total Rows: {total_rows}")
+ }
+ Either::Right(rows) => println!("Affected Rows: {rows}"),
+ };
+
+ println!("Cost {} ms", (end - start).as_millis());
+ Ok(())
+ }
+}
+
+impl Drop for Repl {
+ fn drop(&mut self) {
+ if self.rl.helper().is_some() {
+ let history_file = history_file();
+ if let Err(e) = self.rl.save_history(&history_file) {
+ logging::debug!(
+ "failed to save history file on {}, error: {e}",
+ history_file.display()
+ );
+ }
+ }
+ }
+}
+
+/// Return the location of the history file (defaults to $HOME/".greptimedb_cli_history")
+fn history_file() -> PathBuf {
+ let mut buf = match std::env::var("HOME") {
+ Ok(home) => PathBuf::from(home),
+ Err(_) => PathBuf::new(),
+ };
+ buf.push(".greptimedb_cli_history");
+ buf
+}
diff --git a/src/cmd/src/error.rs b/src/cmd/src/error.rs
index fd2eb7d1ae92..209f41b1a111 100644
--- a/src/cmd/src/error.rs
+++ b/src/cmd/src/error.rs
@@ -15,6 +15,7 @@
use std::any::Any;
use common_error::prelude::*;
+use rustyline::error::ReadlineError;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
@@ -68,6 +69,40 @@ pub enum Error {
#[snafu(backtrace)]
source: meta_srv::error::Error,
},
+
+ #[snafu(display("Invalid REPL command: {reason}"))]
+ InvalidReplCommand { reason: String },
+
+ #[snafu(display("Cannot create REPL: {}", source))]
+ ReplCreation {
+ source: ReadlineError,
+ backtrace: Backtrace,
+ },
+
+ #[snafu(display("Error reading command: {}", source))]
+ Readline {
+ source: ReadlineError,
+ backtrace: Backtrace,
+ },
+
+ #[snafu(display("Failed to request database, sql: {sql}, source: {source}"))]
+ RequestDatabase {
+ sql: String,
+ #[snafu(backtrace)]
+ source: client::Error,
+ },
+
+ #[snafu(display("Failed to collect RecordBatches, source: {source}"))]
+ CollectRecordBatches {
+ #[snafu(backtrace)]
+ source: common_recordbatch::error::Error,
+ },
+
+ #[snafu(display("Failed to pretty print Recordbatches, source: {source}"))]
+ PrettyPrintRecordBatches {
+ #[snafu(backtrace)]
+ source: common_recordbatch::error::Error,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -82,8 +117,15 @@ impl ErrorExt for Error {
Error::ReadConfig { .. } | Error::ParseConfig { .. } | Error::MissingConfig { .. } => {
StatusCode::InvalidArguments
}
- Error::IllegalConfig { .. } => StatusCode::InvalidArguments,
+ Error::IllegalConfig { .. } | Error::InvalidReplCommand { .. } => {
+ StatusCode::InvalidArguments
+ }
Error::IllegalAuthConfig { .. } => StatusCode::InvalidArguments,
+ Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
+ Error::RequestDatabase { source, .. } => source.status_code(),
+ Error::CollectRecordBatches { source } | Error::PrettyPrintRecordBatches { source } => {
+ source.status_code()
+ }
}
}
diff --git a/src/cmd/src/lib.rs b/src/cmd/src/lib.rs
index 61d694e4ae48..157e4853f1cc 100644
--- a/src/cmd/src/lib.rs
+++ b/src/cmd/src/lib.rs
@@ -14,6 +14,7 @@
#![feature(assert_matches)]
+pub mod cli;
pub mod datanode;
pub mod error;
pub mod frontend;
diff --git a/src/cmd/tests/cli.rs b/src/cmd/tests/cli.rs
new file mode 100644
index 000000000000..905191e13bdb
--- /dev/null
+++ b/src/cmd/tests/cli.rs
@@ -0,0 +1,145 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#[cfg(target_os = "macos")]
+mod tests {
+ use std::path::PathBuf;
+ use std::process::{Command, Stdio};
+ use std::time::Duration;
+
+ use rexpect::session::PtyReplSession;
+ use tempdir::TempDir;
+
+ struct Repl {
+ repl: PtyReplSession,
+ }
+
+ impl Repl {
+ fn send_line(&mut self, line: &str) {
+ self.repl.send_line(line).unwrap();
+
+ // read a line to consume the prompt
+ self.read_line();
+ }
+
+ fn read_line(&mut self) -> String {
+ self.repl.read_line().unwrap()
+ }
+
+ fn read_expect(&mut self, expect: &str) {
+ assert_eq!(self.read_line(), expect);
+ }
+
+ fn read_contains(&mut self, pat: &str) {
+ assert!(self.read_line().contains(pat));
+ }
+ }
+
+ #[test]
+ fn test_repl() {
+ let data_dir = TempDir::new_in("/tmp", "data").unwrap();
+ let wal_dir = TempDir::new_in("/tmp", "wal").unwrap();
+
+ let mut bin_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
+ bin_path.push("../../target/debug");
+ let bin_path = bin_path.to_str().unwrap();
+
+ let mut datanode = Command::new("./greptime")
+ .current_dir(bin_path)
+ .args([
+ "datanode",
+ "start",
+ "--rpc-addr=0.0.0.0:4321",
+ "--node-id=1",
+ &format!("--data-dir={}", data_dir.path().display()),
+ &format!("--wal-dir={}", wal_dir.path().display()),
+ ])
+ .stdout(Stdio::null())
+ .spawn()
+ .unwrap();
+
+ // wait for Datanode actually started
+ std::thread::sleep(Duration::from_secs(3));
+
+ let mut repl_cmd = Command::new("./greptime");
+ repl_cmd.current_dir(bin_path).args([
+ "--log-level=off",
+ "cli",
+ "attach",
+ "--grpc-addr=0.0.0.0:4321",
+ // history commands can sneaky into stdout and mess up our tests, so disable it
+ "--disable-helper",
+ ]);
+ let pty_session = rexpect::session::spawn_command(repl_cmd, Some(5_000)).unwrap();
+ let repl = PtyReplSession {
+ prompt: "> ".to_string(),
+ pty_session,
+ quit_command: None,
+ echo_on: false,
+ };
+ let repl = &mut Repl { repl };
+ repl.read_expect("Ready for commands. (Hint: try 'help')");
+
+ test_create_database(repl);
+
+ test_use_database(repl);
+
+ test_create_table(repl);
+
+ test_insert(repl);
+
+ test_select(repl);
+
+ datanode.kill().unwrap();
+ datanode.wait().unwrap();
+ }
+
+ fn test_create_database(repl: &mut Repl) {
+ repl.send_line("CREATE DATABASE db;");
+ repl.read_expect("Affected Rows: 1");
+ repl.read_contains("Cost");
+ }
+
+ fn test_use_database(repl: &mut Repl) {
+ repl.send_line("USE db");
+ repl.read_expect("Total Rows: 0");
+ repl.read_contains("Cost");
+ repl.read_expect("Using db");
+ }
+
+ fn test_create_table(repl: &mut Repl) {
+ repl.send_line("CREATE TABLE t(x STRING, ts TIMESTAMP TIME INDEX);");
+ repl.read_expect("Affected Rows: 0");
+ repl.read_contains("Cost");
+ }
+
+ fn test_insert(repl: &mut Repl) {
+ repl.send_line("INSERT INTO t(x, ts) VALUES ('hello', 1676895812239);");
+ repl.read_expect("Affected Rows: 1");
+ repl.read_contains("Cost");
+ }
+
+ fn test_select(repl: &mut Repl) {
+ repl.send_line("SELECT * FROM t;");
+
+ repl.read_expect("+-------+-------------------------+");
+ repl.read_expect("| x | ts |");
+ repl.read_expect("+-------+-------------------------+");
+ repl.read_expect("| hello | 2023-02-20T12:23:32.239 |");
+ repl.read_expect("+-------+-------------------------+");
+ repl.read_expect("Total Rows: 1");
+
+ repl.read_contains("Cost");
+ }
+}
|
feat
|
a simple REPL for debugging purpose (#1048)
|
9bc4c0d9c7b4c8019966a2f38cf5c6afbed66bce
|
2023-01-20 13:45:16
|
Ning Sun
|
fix: mysql tests error (#897)
| false
|
diff --git a/src/servers/tests/mysql/mysql_server_test.rs b/src/servers/tests/mysql/mysql_server_test.rs
index 9ec9f1caf760..97323c366af8 100644
--- a/src/servers/tests/mysql/mysql_server_test.rs
+++ b/src/servers/tests/mysql/mysql_server_test.rs
@@ -230,7 +230,7 @@ async fn test_server_required_secure_client_plain() -> Result<()> {
let recordbatch = RecordBatch::new(schema, columns).unwrap();
let table = MemTable::new("all_datatypes", recordbatch);
- let mysql_server = create_mysql_server(table, server_tls)?;
+ let mysql_server = create_mysql_server(table, server_tls, None)?;
let listening = "127.0.0.1:0".parse::<SocketAddr>().unwrap();
let server_addr = mysql_server.start(listening).await.unwrap();
|
fix
|
mysql tests error (#897)
|
d7a906e0bd81ff2b786fd6e88bb703afe67a3e22
|
2023-05-04 13:12:43
|
JeremyHi
|
feat: metasrv mailbox (#1481)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index e3dd55c39e5f..ecf7dd9eedc7 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3821,7 +3821,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "greptime-proto"
version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=a26c40c004f998180b8acd853b22f083773f36b9#a26c40c004f998180b8acd853b22f083773f36b9"
+source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=e8abf8241c908448dce595399e89c89a40d048bd#e8abf8241c908448dce595399e89c89a40d048bd"
dependencies = [
"prost",
"tonic 0.9.2",
diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml
index 1f6163b99ffd..b4b0523959cd 100644
--- a/src/api/Cargo.toml
+++ b/src/api/Cargo.toml
@@ -10,7 +10,7 @@ common-base = { path = "../common/base" }
common-error = { path = "../common/error" }
common-time = { path = "../common/time" }
datatypes = { path = "../datatypes" }
-greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "a26c40c004f998180b8acd853b22f083773f36b9" }
+greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "e8abf8241c908448dce595399e89c89a40d048bd" }
prost.workspace = true
snafu = { version = "0.7", features = ["backtraces"] }
tonic.workspace = true
diff --git a/src/meta-srv/src/error.rs b/src/meta-srv/src/error.rs
index ec580fd5a148..9bc7be2c32d9 100644
--- a/src/meta-srv/src/error.rs
+++ b/src/meta-srv/src/error.rs
@@ -291,6 +291,28 @@ pub enum Error {
schema_name: String,
location: Location,
},
+
+ #[snafu(display("Pusher not found: {pusher_id}"))]
+ PusherNotFound {
+ pusher_id: String,
+ location: Location,
+ },
+
+ #[snafu(display("Failed to push message: {err_msg}"))]
+ PushMessage { err_msg: String, location: Location },
+
+ #[snafu(display("Mailbox already closed: {id}"))]
+ MailboxClosed { id: u64, location: Location },
+
+ #[snafu(display("Mailbox timeout: {id}"))]
+ MailboxTimeout { id: u64, location: Location },
+
+ #[snafu(display("Mailbox receiver got an error: {id}, {err_msg}"))]
+ MailboxReceiver {
+ id: u64,
+ err_msg: String,
+ location: Location,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -332,6 +354,11 @@ impl ErrorExt for Error {
| Error::SendShutdownSignal { .. }
| Error::ParseAddr { .. }
| Error::SchemaAlreadyExists { .. }
+ | Error::PusherNotFound { .. }
+ | Error::PushMessage { .. }
+ | Error::MailboxClosed { .. }
+ | Error::MailboxTimeout { .. }
+ | Error::MailboxReceiver { .. }
| Error::StartGrpc { .. } => StatusCode::Internal,
Error::EmptyKey { .. }
| Error::MissingRequiredParameter { .. }
diff --git a/src/meta-srv/src/handler.rs b/src/meta-srv/src/handler.rs
index d878d9103f21..685d059ec6fa 100644
--- a/src/meta-srv/src/handler.rs
+++ b/src/meta-srv/src/handler.rs
@@ -12,39 +12,49 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::collections::BTreeMap;
+use std::sync::Arc;
+use std::time::Duration;
+
+use api::v1::meta::{
+ HeartbeatRequest, HeartbeatResponse, MailboxMessage, RequestHeader, ResponseHeader, Role,
+ PROTOCOL_VERSION,
+};
pub use check_leader_handler::CheckLeaderHandler;
pub use collect_stats_handler::CollectStatsHandler;
+use common_telemetry::{info, warn};
+use dashmap::DashMap;
pub use failure_handler::RegionFailureHandler;
pub use keep_lease_handler::KeepLeaseHandler;
pub use on_leader_start::OnLeaderStartHandler;
pub use persist_stats_handler::PersistStatsHandler;
pub use response_header_handler::ResponseHeaderHandler;
+use snafu::OptionExt;
+use tokio::sync::mpsc::Sender;
+use tokio::sync::{oneshot, Notify, RwLock};
+
+use self::instruction::Instruction;
+use self::node_stat::Stat;
+use crate::error::{self, Result};
+use crate::metasrv::Context;
+use crate::sequence::Sequence;
+use crate::service::mailbox::{Channel, Mailbox, MailboxReceiver, MailboxRef, MessageId};
mod check_leader_handler;
mod collect_stats_handler;
mod failure_handler;
mod instruction;
mod keep_lease_handler;
+pub mod mailbox_handler;
pub mod node_stat;
mod on_leader_start;
mod persist_stats_handler;
mod response_header_handler;
-use std::collections::BTreeMap;
-use std::sync::Arc;
-
-use api::v1::meta::{HeartbeatRequest, HeartbeatResponse, ResponseHeader};
-use common_telemetry::info;
-use tokio::sync::mpsc::Sender;
-use tokio::sync::RwLock;
-
-use self::instruction::Instruction;
-use self::node_stat::Stat;
-use crate::error::Result;
-use crate::metasrv::Context;
-
#[async_trait::async_trait]
pub trait HeartbeatHandler: Send + Sync {
+ fn is_acceptable(&self, role: Role) -> bool;
+
async fn handle(
&self,
req: &HeartbeatRequest,
@@ -61,13 +71,46 @@ pub struct HeartbeatAccumulator {
}
impl HeartbeatAccumulator {
- pub fn into_payload(self) -> Vec<Vec<u8>> {
+ pub fn into_mailbox_messages(self) -> Vec<MailboxMessage> {
// TODO(jiachun): to HeartbeatResponse payload
vec![]
}
}
-pub type Pusher = Sender<std::result::Result<HeartbeatResponse, tonic::Status>>;
+pub struct Pusher {
+ sender: Sender<std::result::Result<HeartbeatResponse, tonic::Status>>,
+ res_header: ResponseHeader,
+}
+
+impl Pusher {
+ pub fn new(
+ sender: Sender<std::result::Result<HeartbeatResponse, tonic::Status>>,
+ req_header: &Option<RequestHeader>,
+ ) -> Self {
+ let res_header = ResponseHeader {
+ protocol_version: PROTOCOL_VERSION,
+ cluster_id: req_header.as_ref().map_or(0, |h| h.cluster_id),
+ ..Default::default()
+ };
+
+ Self { sender, res_header }
+ }
+
+ #[inline]
+ pub async fn push(&self, res: HeartbeatResponse) -> Result<()> {
+ self.sender.send(Ok(res)).await.map_err(|e| {
+ error::PushMessageSnafu {
+ err_msg: e.to_string(),
+ }
+ .build()
+ })
+ }
+
+ #[inline]
+ pub fn header(&self) -> ResponseHeader {
+ self.res_header.clone()
+ }
+}
#[derive(Clone, Default)]
pub struct HeartbeatHandlerGroup {
@@ -95,6 +138,10 @@ impl HeartbeatHandlerGroup {
pushers.remove(key)
}
+ pub fn pushers(&self) -> Arc<RwLock<BTreeMap<String, Pusher>>> {
+ self.pushers.clone()
+ }
+
pub async fn handle(
&self,
req: HeartbeatRequest,
@@ -103,13 +150,231 @@ impl HeartbeatHandlerGroup {
let mut acc = HeartbeatAccumulator::default();
let handlers = self.handlers.read().await;
for h in handlers.iter() {
- h.handle(&req, &mut ctx, &mut acc).await?;
+ if ctx.is_skip_all() {
+ break;
+ }
+
+ let role = req
+ .header
+ .as_ref()
+ .and_then(|h| Role::from_i32(h.role))
+ .context(error::InvalidArgumentsSnafu {
+ err_msg: format!("invalid role: {:?}", req.header),
+ })?;
+
+ if h.is_acceptable(role) {
+ h.handle(&req, &mut ctx, &mut acc).await?;
+ }
}
let header = std::mem::take(&mut acc.header);
let res = HeartbeatResponse {
header,
- payload: acc.into_payload(),
+ mailbox_messages: acc.into_mailbox_messages(),
};
Ok(res)
}
}
+
+pub struct HeartbeatMailbox {
+ pushers: Arc<RwLock<BTreeMap<String, Pusher>>>,
+ sequence: Sequence,
+ senders: DashMap<MessageId, oneshot::Sender<Result<MailboxMessage>>>,
+ timeouts: DashMap<MessageId, Duration>,
+ timeout_notify: Notify,
+}
+
+impl HeartbeatMailbox {
+ pub fn create(
+ pushers: Arc<RwLock<BTreeMap<String, Pusher>>>,
+ sequence: Sequence,
+ ) -> MailboxRef {
+ let mailbox = Arc::new(Self::new(pushers, sequence));
+
+ let timeout_checker = mailbox.clone();
+ common_runtime::spawn_bg(async move {
+ timeout_checker.check_timeout_bg(10).await;
+ });
+
+ mailbox
+ }
+
+ fn new(pushers: Arc<RwLock<BTreeMap<String, Pusher>>>, sequence: Sequence) -> Self {
+ Self {
+ pushers,
+ sequence,
+ senders: DashMap::default(),
+ timeouts: DashMap::default(),
+ timeout_notify: Notify::new(),
+ }
+ }
+
+ async fn check_timeout_bg(&self, interval_millis: u64) {
+ let mut interval = tokio::time::interval(Duration::from_millis(interval_millis));
+
+ loop {
+ interval.tick().await;
+
+ if self.timeouts.is_empty() {
+ self.timeout_notify.notified().await;
+ }
+
+ let now = Duration::from_millis(common_time::util::current_time_millis() as u64);
+ let timeout_ids = self
+ .timeouts
+ .iter()
+ .filter_map(|entry| {
+ let (id, deadline) = entry.pair();
+ if deadline < &now {
+ Some(*id)
+ } else {
+ None
+ }
+ })
+ .collect::<Vec<_>>();
+
+ for id in timeout_ids {
+ let _ = self
+ .on_recv(id, Err(error::MailboxTimeoutSnafu { id }.build()))
+ .await;
+ }
+ }
+ }
+}
+
+#[async_trait::async_trait]
+impl Mailbox for HeartbeatMailbox {
+ async fn send(
+ &self,
+ ch: &Channel,
+ mut msg: MailboxMessage,
+ timeout: Duration,
+ ) -> Result<MailboxReceiver> {
+ let message_id = self.sequence.next().await?;
+
+ let pusher_id = match ch {
+ Channel::Datanode(id) => format!("{}-{}", Role::Datanode as i32, id),
+ Channel::Frontend(id) => format!("{}-{}", Role::Frontend as i32, id),
+ };
+ let pushers = self.pushers.read().await;
+ let pusher = pushers
+ .get(&pusher_id)
+ .context(error::PusherNotFoundSnafu { pusher_id })?;
+
+ let (tx, rx) = oneshot::channel();
+ self.senders.insert(message_id, tx);
+ let deadline =
+ Duration::from_millis(common_time::util::current_time_millis() as u64) + timeout;
+ self.timeouts.insert(message_id, deadline);
+ self.timeout_notify.notify_one();
+
+ let header = pusher.header();
+ msg.id = message_id;
+ let res = HeartbeatResponse {
+ header: Some(header),
+ mailbox_messages: vec![msg],
+ };
+
+ pusher.push(res).await?;
+
+ Ok(MailboxReceiver::new(message_id, rx))
+ }
+
+ async fn on_recv(&self, id: MessageId, maybe_msg: Result<MailboxMessage>) -> Result<()> {
+ self.timeouts.remove(&id);
+
+ if let Some((_, tx)) = self.senders.remove(&id) {
+ tx.send(maybe_msg)
+ .map_err(|_| error::MailboxClosedSnafu { id }.build())?;
+ } else if let Ok(finally_msg) = maybe_msg {
+ let MailboxMessage {
+ id,
+ subject,
+ from,
+ to,
+ timestamp_millis,
+ ..
+ } = finally_msg;
+ warn!("The response arrived too late, id={id}, subject={subject}, from={from}, to={to}, timestamp={timestamp_millis}");
+ }
+
+ Ok(())
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Arc;
+ use std::time::Duration;
+
+ use api::v1::meta::{MailboxMessage, RequestHeader, Role, PROTOCOL_VERSION};
+ use tokio::sync::mpsc;
+
+ use crate::handler::{HeartbeatHandlerGroup, HeartbeatMailbox, Pusher};
+ use crate::sequence::Sequence;
+ use crate::service::mailbox::{Channel, MailboxReceiver, MailboxRef};
+ use crate::service::store::memory::MemStore;
+
+ #[tokio::test]
+ async fn test_mailbox() {
+ let (mailbox, receiver) = push_msg_via_mailbox().await;
+ let id = receiver.message_id();
+
+ let resp_msg = MailboxMessage {
+ id,
+ subject: "resp-test".to_string(),
+ timestamp_millis: 456,
+ ..Default::default()
+ };
+
+ mailbox.on_recv(id, Ok(resp_msg)).await.unwrap();
+
+ let recv_msg = receiver.await.unwrap().unwrap();
+ assert_eq!(recv_msg.id, id);
+ assert_eq!(recv_msg.timestamp_millis, 456);
+ assert_eq!(recv_msg.subject, "resp-test".to_string());
+ }
+
+ #[tokio::test]
+ async fn test_mailbox_timeout() {
+ let (_, receiver) = push_msg_via_mailbox().await;
+ let res = receiver.await.unwrap();
+ assert!(res.is_err());
+ }
+
+ async fn push_msg_via_mailbox() -> (MailboxRef, MailboxReceiver) {
+ let datanode_id = 12;
+ let (pusher_tx, mut pusher_rx) = mpsc::channel(16);
+ let res_header = RequestHeader {
+ protocol_version: PROTOCOL_VERSION,
+ ..Default::default()
+ };
+ let pusher: Pusher = Pusher::new(pusher_tx, &Option::from(res_header));
+ let handler_group = HeartbeatHandlerGroup::default();
+ handler_group
+ .register(format!("{}-{}", Role::Datanode as i32, datanode_id), pusher)
+ .await;
+
+ let kv_store = Arc::new(MemStore::new());
+ let seq = Sequence::new("test_seq", 0, 10, kv_store);
+ let mailbox = HeartbeatMailbox::create(handler_group.pushers(), seq);
+
+ let msg = MailboxMessage {
+ id: 0,
+ subject: "req-test".to_string(),
+ timestamp_millis: 123,
+ ..Default::default()
+ };
+ let ch = Channel::Datanode(datanode_id);
+
+ let receiver = mailbox
+ .send(&ch, msg, Duration::from_secs(1))
+ .await
+ .unwrap();
+
+ let recv_obj = pusher_rx.recv().await.unwrap().unwrap();
+ assert_eq!(recv_obj.mailbox_messages[0].timestamp_millis, 123);
+ assert_eq!(recv_obj.mailbox_messages[0].subject, "req-test".to_string());
+
+ (mailbox, receiver)
+ }
+}
diff --git a/src/meta-srv/src/handler/check_leader_handler.rs b/src/meta-srv/src/handler/check_leader_handler.rs
index c30f41fb18d3..2d83b5e8f1c3 100644
--- a/src/meta-srv/src/handler/check_leader_handler.rs
+++ b/src/meta-srv/src/handler/check_leader_handler.rs
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use api::v1::meta::{Error, HeartbeatRequest};
+use api::v1::meta::{Error, HeartbeatRequest, Role};
use crate::error::Result;
use crate::handler::{HeartbeatAccumulator, HeartbeatHandler};
@@ -23,6 +23,10 @@ pub struct CheckLeaderHandler;
#[async_trait::async_trait]
impl HeartbeatHandler for CheckLeaderHandler {
+ fn is_acceptable(&self, role: Role) -> bool {
+ role == Role::Datanode
+ }
+
async fn handle(
&self,
_req: &HeartbeatRequest,
diff --git a/src/meta-srv/src/handler/collect_stats_handler.rs b/src/meta-srv/src/handler/collect_stats_handler.rs
index a1274fa5b744..5fc38379e1c7 100644
--- a/src/meta-srv/src/handler/collect_stats_handler.rs
+++ b/src/meta-srv/src/handler/collect_stats_handler.rs
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use api::v1::meta::HeartbeatRequest;
+use api::v1::meta::{HeartbeatRequest, Role};
use common_telemetry::debug;
use super::node_stat::Stat;
@@ -24,16 +24,16 @@ pub struct CollectStatsHandler;
#[async_trait::async_trait]
impl HeartbeatHandler for CollectStatsHandler {
+ fn is_acceptable(&self, role: Role) -> bool {
+ role == Role::Datanode
+ }
+
async fn handle(
&self,
req: &HeartbeatRequest,
- ctx: &mut Context,
+ _ctx: &mut Context,
acc: &mut HeartbeatAccumulator,
) -> Result<()> {
- if ctx.is_skip_all() {
- return Ok(());
- }
-
match Stat::try_from(req.clone()) {
Ok(stat) => {
let _ = acc.stat.insert(stat);
diff --git a/src/meta-srv/src/handler/failure_handler.rs b/src/meta-srv/src/handler/failure_handler.rs
index 2593be500b6f..67b05ad62a8b 100644
--- a/src/meta-srv/src/handler/failure_handler.rs
+++ b/src/meta-srv/src/handler/failure_handler.rs
@@ -14,7 +14,7 @@
mod runner;
-use api::v1::meta::HeartbeatRequest;
+use api::v1::meta::{HeartbeatRequest, Role};
use async_trait::async_trait;
use crate::error::Result;
@@ -58,6 +58,10 @@ impl RegionFailureHandler {
#[async_trait]
impl HeartbeatHandler for RegionFailureHandler {
+ fn is_acceptable(&self, role: Role) -> bool {
+ role == Role::Datanode
+ }
+
async fn handle(
&self,
_: &HeartbeatRequest,
@@ -70,10 +74,6 @@ impl HeartbeatHandler for RegionFailureHandler {
.await;
}
- if ctx.is_skip_all() {
- return Ok(());
- }
-
let Some(stat) = acc.stat.as_ref() else { return Ok(()) };
let heartbeat = DatanodeHeartbeat {
diff --git a/src/meta-srv/src/handler/keep_lease_handler.rs b/src/meta-srv/src/handler/keep_lease_handler.rs
index c709330f70bc..24fa22134b60 100644
--- a/src/meta-srv/src/handler/keep_lease_handler.rs
+++ b/src/meta-srv/src/handler/keep_lease_handler.rs
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use api::v1::meta::{BatchPutRequest, HeartbeatRequest, KeyValue};
+use api::v1::meta::{BatchPutRequest, HeartbeatRequest, KeyValue, Role};
use common_telemetry::{trace, warn};
use common_time::util as time_util;
use tokio::sync::mpsc::{self, Sender};
@@ -55,16 +55,16 @@ impl KeepLeaseHandler {
#[async_trait::async_trait]
impl HeartbeatHandler for KeepLeaseHandler {
+ fn is_acceptable(&self, role: Role) -> bool {
+ role == Role::Datanode
+ }
+
async fn handle(
&self,
req: &HeartbeatRequest,
- ctx: &mut Context,
+ _ctx: &mut Context,
_acc: &mut HeartbeatAccumulator,
) -> Result<()> {
- if ctx.is_skip_all() {
- return Ok(());
- }
-
let HeartbeatRequest { header, peer, .. } = req;
if let Some(peer) = &peer {
let key = LeaseKey {
diff --git a/src/meta-srv/src/handler/mailbox_handler.rs b/src/meta-srv/src/handler/mailbox_handler.rs
new file mode 100644
index 000000000000..6bcc0ff4c858
--- /dev/null
+++ b/src/meta-srv/src/handler/mailbox_handler.rs
@@ -0,0 +1,47 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use api::v1::meta::{HeartbeatRequest, Role};
+
+use crate::error::Result;
+use crate::handler::{HeartbeatAccumulator, HeartbeatHandler};
+use crate::metasrv::Context;
+
+#[derive(Default)]
+pub struct MailboxHandler;
+
+#[async_trait::async_trait]
+impl HeartbeatHandler for MailboxHandler {
+ fn is_acceptable(&self, _role: Role) -> bool {
+ true
+ }
+
+ async fn handle(
+ &self,
+ req: &HeartbeatRequest,
+ ctx: &mut Context,
+ _acc: &mut HeartbeatAccumulator,
+ ) -> Result<()> {
+ if req.mailbox_messages.is_empty() {
+ return Ok(());
+ }
+
+ let mailbox_messages = req.mailbox_messages.clone();
+ for msg in mailbox_messages {
+ ctx.mailbox.on_recv(msg.id, Ok(msg)).await?;
+ }
+
+ Ok(())
+ }
+}
diff --git a/src/meta-srv/src/handler/on_leader_start.rs b/src/meta-srv/src/handler/on_leader_start.rs
index 163be19a35c2..c65ff412b6dc 100644
--- a/src/meta-srv/src/handler/on_leader_start.rs
+++ b/src/meta-srv/src/handler/on_leader_start.rs
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use api::v1::meta::HeartbeatRequest;
+use api::v1::meta::{HeartbeatRequest, Role};
use crate::error::Result;
use crate::handler::{HeartbeatAccumulator, HeartbeatHandler};
@@ -23,6 +23,10 @@ pub struct OnLeaderStartHandler;
#[async_trait::async_trait]
impl HeartbeatHandler for OnLeaderStartHandler {
+ fn is_acceptable(&self, role: Role) -> bool {
+ role == Role::Datanode
+ }
+
async fn handle(
&self,
_req: &HeartbeatRequest,
diff --git a/src/meta-srv/src/handler/persist_stats_handler.rs b/src/meta-srv/src/handler/persist_stats_handler.rs
index 385f8c8fe652..d7395e15a142 100644
--- a/src/meta-srv/src/handler/persist_stats_handler.rs
+++ b/src/meta-srv/src/handler/persist_stats_handler.rs
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use api::v1::meta::{HeartbeatRequest, PutRequest};
+use api::v1::meta::{HeartbeatRequest, PutRequest, Role};
use dashmap::DashMap;
use crate::error::Result;
@@ -30,16 +30,16 @@ pub struct PersistStatsHandler {
#[async_trait::async_trait]
impl HeartbeatHandler for PersistStatsHandler {
+ fn is_acceptable(&self, role: Role) -> bool {
+ role == Role::Datanode
+ }
+
async fn handle(
&self,
_req: &HeartbeatRequest,
ctx: &mut Context,
acc: &mut HeartbeatAccumulator,
) -> Result<()> {
- if ctx.is_skip_all() {
- return Ok(());
- }
-
let Some(stat) = acc.stat.take() else { return Ok(()) };
let key = stat.stat_key();
@@ -78,18 +78,23 @@ mod tests {
use api::v1::meta::RangeRequest;
use super::*;
+ use crate::handler::HeartbeatMailbox;
use crate::keys::StatKey;
+ use crate::sequence::Sequence;
use crate::service::store::memory::MemStore;
#[tokio::test]
async fn test_handle_datanode_stats() {
let in_memory = Arc::new(MemStore::new());
let kv_store = Arc::new(MemStore::new());
+ let seq = Sequence::new("test_seq", 0, 10, kv_store.clone());
+ let mailbox = HeartbeatMailbox::create(Arc::new(Default::default()), seq);
let mut ctx = Context {
datanode_lease_secs: 30,
server_addr: "127.0.0.1:0000".to_string(),
in_memory,
kv_store,
+ mailbox,
election: None,
skip_all: Arc::new(AtomicBool::new(false)),
catalog: None,
diff --git a/src/meta-srv/src/handler/response_header_handler.rs b/src/meta-srv/src/handler/response_header_handler.rs
index e040d1e87e04..733c0861298c 100644
--- a/src/meta-srv/src/handler/response_header_handler.rs
+++ b/src/meta-srv/src/handler/response_header_handler.rs
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use api::v1::meta::{HeartbeatRequest, ResponseHeader, PROTOCOL_VERSION};
+use api::v1::meta::{HeartbeatRequest, ResponseHeader, Role, PROTOCOL_VERSION};
use crate::error::Result;
use crate::handler::{HeartbeatAccumulator, HeartbeatHandler};
@@ -23,6 +23,10 @@ pub struct ResponseHeaderHandler;
#[async_trait::async_trait]
impl HeartbeatHandler for ResponseHeaderHandler {
+ fn is_acceptable(&self, role: Role) -> bool {
+ role == Role::Datanode
+ }
+
async fn handle(
&self,
req: &HeartbeatRequest,
@@ -48,18 +52,22 @@ mod tests {
use api::v1::meta::{HeartbeatResponse, RequestHeader};
use super::*;
- use crate::handler::Context;
+ use crate::handler::{Context, HeartbeatMailbox};
+ use crate::sequence::Sequence;
use crate::service::store::memory::MemStore;
#[tokio::test]
async fn test_handle_heartbeat_resp_header() {
let in_memory = Arc::new(MemStore::new());
let kv_store = Arc::new(MemStore::new());
+ let seq = Sequence::new("test_seq", 0, 10, kv_store.clone());
+ let mailbox = HeartbeatMailbox::create(Arc::new(Default::default()), seq);
let mut ctx = Context {
datanode_lease_secs: 30,
server_addr: "127.0.0.1:0000".to_string(),
in_memory,
kv_store,
+ mailbox,
election: None,
skip_all: Arc::new(AtomicBool::new(false)),
catalog: None,
@@ -82,7 +90,7 @@ mod tests {
let header = std::mem::take(&mut acc.header);
let res = HeartbeatResponse {
header,
- payload: acc.into_payload(),
+ mailbox_messages: acc.into_mailbox_messages(),
};
assert_eq!(1, res.header.unwrap().cluster_id);
}
diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs
index 10b95bee0350..4c9fe18d3f32 100644
--- a/src/meta-srv/src/metasrv.rs
+++ b/src/meta-srv/src/metasrv.rs
@@ -35,6 +35,7 @@ use crate::lock::DistLockRef;
use crate::metadata_service::MetadataServiceRef;
use crate::selector::{Selector, SelectorType};
use crate::sequence::SequenceRef;
+use crate::service::mailbox::MailboxRef;
use crate::service::store::kv::{KvStoreRef, ResettableKvStoreRef};
pub const TABLE_ID_SEQ: &str = "table_id";
@@ -73,6 +74,7 @@ pub struct Context {
pub server_addr: String,
pub in_memory: ResettableKvStoreRef,
pub kv_store: KvStoreRef,
+ pub mailbox: MailboxRef,
pub election: Option<ElectionRef>,
pub skip_all: Arc<AtomicBool>,
pub catalog: Option<String>,
@@ -116,6 +118,7 @@ pub struct MetaSrv {
lock: Option<DistLockRef>,
procedure_manager: ProcedureManagerRef,
metadata_service: MetadataServiceRef,
+ mailbox: MailboxRef,
}
impl MetaSrv {
@@ -240,12 +243,18 @@ impl MetaSrv {
self.lock.clone()
}
+ #[inline]
+ pub fn mailbox(&self) -> MailboxRef {
+ self.mailbox.clone()
+ }
+
#[inline]
pub fn new_ctx(&self) -> Context {
let datanode_lease_secs = self.options().datanode_lease_secs;
let server_addr = self.options().server_addr.clone();
let in_memory = self.in_memory();
let kv_store = self.kv_store();
+ let mailbox = self.mailbox();
let election = self.election();
let skip_all = Arc::new(AtomicBool::new(false));
Context {
@@ -253,6 +262,7 @@ impl MetaSrv {
server_addr,
in_memory,
kv_store,
+ mailbox,
election,
skip_all,
catalog: None,
diff --git a/src/meta-srv/src/metasrv/builder.rs b/src/meta-srv/src/metasrv/builder.rs
index 48c7c2323330..90f8822115f4 100644
--- a/src/meta-srv/src/metasrv/builder.rs
+++ b/src/meta-srv/src/metasrv/builder.rs
@@ -18,9 +18,11 @@ use std::sync::Arc;
use common_procedure::local::{LocalManager, ManagerConfig};
use crate::cluster::MetaPeerClient;
+use crate::handler::mailbox_handler::MailboxHandler;
use crate::handler::{
- CheckLeaderHandler, CollectStatsHandler, HeartbeatHandlerGroup, KeepLeaseHandler,
- OnLeaderStartHandler, PersistStatsHandler, RegionFailureHandler, ResponseHeaderHandler,
+ CheckLeaderHandler, CollectStatsHandler, HeartbeatHandlerGroup, HeartbeatMailbox,
+ KeepLeaseHandler, OnLeaderStartHandler, PersistStatsHandler, RegionFailureHandler,
+ ResponseHeaderHandler,
};
use crate::lock::DistLockRef;
use crate::metadata_service::{DefaultMetadataService, MetadataServiceRef};
@@ -143,6 +145,7 @@ impl MetaSrvBuilder {
group.add_handler(CheckLeaderHandler::default()).await;
group.add_handler(OnLeaderStartHandler::default()).await;
group.add_handler(CollectStatsHandler).await;
+ group.add_handler(MailboxHandler).await;
group.add_handler(region_failure_handler).await;
group.add_handler(PersistStatsHandler::default()).await;
group
@@ -158,6 +161,9 @@ impl MetaSrvBuilder {
let metadata_service = metadata_service
.unwrap_or_else(|| Arc::new(DefaultMetadataService::new(kv_store.clone())));
+ let mailbox_sequence = Sequence::new("heartbeat_mailbox", 0, 100, kv_store.clone());
+ let mailbox = HeartbeatMailbox::create(handler_group.pushers(), mailbox_sequence);
+
MetaSrv {
started,
options,
@@ -171,6 +177,7 @@ impl MetaSrvBuilder {
lock,
procedure_manager,
metadata_service,
+ mailbox,
}
}
}
diff --git a/src/meta-srv/src/service.rs b/src/meta-srv/src/service.rs
index cb386f82ff8c..70f95fbd2536 100644
--- a/src/meta-srv/src/service.rs
+++ b/src/meta-srv/src/service.rs
@@ -21,6 +21,7 @@ pub mod admin;
pub mod cluster;
mod heartbeat;
pub mod lock;
+pub mod mailbox;
pub mod router;
pub mod store;
diff --git a/src/meta-srv/src/service/heartbeat.rs b/src/meta-srv/src/service/heartbeat.rs
index 83cf5e26ead8..c7e7984975c9 100644
--- a/src/meta-srv/src/service/heartbeat.rs
+++ b/src/meta-srv/src/service/heartbeat.rs
@@ -13,7 +13,6 @@
// limitations under the License.
use std::io::ErrorKind;
-use std::sync::atomic::{AtomicU64, Ordering};
use api::v1::meta::{
heartbeat_server, AskLeaderRequest, AskLeaderResponse, HeartbeatRequest, HeartbeatResponse,
@@ -27,11 +26,10 @@ use tonic::{Request, Response, Streaming};
use crate::error;
use crate::error::Result;
+use crate::handler::Pusher;
use crate::metasrv::{Context, MetaSrv};
use crate::service::{GrpcResult, GrpcStream};
-static PUSHER_ID: AtomicU64 = AtomicU64::new(0);
-
#[async_trait::async_trait]
impl heartbeat_server::Heartbeat for MetaSrv {
type HeartbeatStream = GrpcStream<HeartbeatResponse>;
@@ -50,15 +48,12 @@ impl heartbeat_server::Heartbeat for MetaSrv {
let mut quit = false;
match msg {
Ok(req) => {
+ let role = req.header.as_ref().map_or(0, |h| h.role);
if pusher_key.is_none() {
if let Some(peer) = &req.peer {
- let key = format!(
- "{}-{}-{}",
- peer.addr,
- peer.id,
- PUSHER_ID.fetch_add(1, Ordering::Relaxed)
- );
- handler_group.register(&key, tx.clone()).await;
+ let key = format!("{}-{}", role, peer.id,);
+ let pusher = Pusher::new(tx.clone(), &req.header);
+ handler_group.register(&key, pusher).await;
pusher_key = Some(key);
}
}
diff --git a/src/meta-srv/src/service/mailbox.rs b/src/meta-srv/src/service/mailbox.rs
new file mode 100644
index 000000000000..32535e59470a
--- /dev/null
+++ b/src/meta-srv/src/service/mailbox.rs
@@ -0,0 +1,76 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::pin::Pin;
+use std::sync::Arc;
+use std::task::{Context, Poll};
+use std::time::Duration;
+
+use api::v1::meta::MailboxMessage;
+use futures::Future;
+use tokio::sync::oneshot;
+
+use crate::error::{self, Result};
+
+pub type MailboxRef = Arc<dyn Mailbox>;
+
+pub type MessageId = u64;
+
+pub enum Channel {
+ Datanode(u64),
+ Frontend(u64),
+}
+
+pub struct MailboxReceiver {
+ message_id: MessageId,
+ rx: oneshot::Receiver<Result<MailboxMessage>>,
+}
+
+impl MailboxReceiver {
+ pub fn new(message_id: MessageId, rx: oneshot::Receiver<Result<MailboxMessage>>) -> Self {
+ Self { message_id, rx }
+ }
+
+ pub fn message_id(&self) -> MessageId {
+ self.message_id
+ }
+}
+
+impl Future for MailboxReceiver {
+ type Output = Result<Result<MailboxMessage>>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ Pin::new(&mut self.rx).poll(cx).map(|r| {
+ r.map_err(|e| {
+ error::MailboxReceiverSnafu {
+ id: self.message_id,
+ err_msg: e.to_string(),
+ }
+ .build()
+ })
+ })
+ }
+}
+
+#[async_trait::async_trait]
+pub trait Mailbox: Send + Sync {
+ async fn send(
+ &self,
+ ch: &Channel,
+ msg: MailboxMessage,
+ timeout: Duration,
+ ) -> Result<MailboxReceiver>;
+
+ async fn on_recv(&self, id: MessageId, maybe_msg: Result<MailboxMessage>) -> Result<()>;
+}
|
feat
|
metasrv mailbox (#1481)
|
08bd40333cb228c51f74529f6edf66b56c508bc9
|
2024-09-19 10:08:41
|
Ning Sun
|
feat: add an option to turn on compression for arrow output (#4730)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index fdad3a09bef5..317665770625 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -443,6 +443,7 @@ dependencies = [
"arrow-schema",
"flatbuffers",
"lz4_flex 0.11.3",
+ "zstd 0.13.1",
]
[[package]]
diff --git a/Cargo.toml b/Cargo.toml
index 046bf82478f5..c1eea12a53bc 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -90,7 +90,7 @@ aquamarine = "0.3"
arrow = { version = "51.0.0", features = ["prettyprint"] }
arrow-array = { version = "51.0.0", default-features = false, features = ["chrono-tz"] }
arrow-flight = "51.0"
-arrow-ipc = { version = "51.0.0", default-features = false, features = ["lz4"] }
+arrow-ipc = { version = "51.0.0", default-features = false, features = ["lz4", "zstd"] }
arrow-schema = { version = "51.0", features = ["serde"] }
async-stream = "0.3"
async-trait = "0.1"
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index 2313d19bbeb5..956a650fccae 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -1136,7 +1136,7 @@ mod test {
RecordBatches::try_new(schema.clone(), vec![recordbatch.clone()]).unwrap();
let outputs = vec![Ok(Output::new_with_record_batches(recordbatches))];
let json_resp = match format {
- ResponseFormat::Arrow => ArrowResponse::from_output(outputs).await,
+ ResponseFormat::Arrow => ArrowResponse::from_output(outputs, None).await,
ResponseFormat::Csv => CsvResponse::from_output(outputs).await,
ResponseFormat::Table => TableResponse::from_output(outputs).await,
ResponseFormat::GreptimedbV1 => GreptimedbV1Response::from_output(outputs).await,
diff --git a/src/servers/src/http/arrow_result.rs b/src/servers/src/http/arrow_result.rs
index e6d2441ee22d..6a739fee0464 100644
--- a/src/servers/src/http/arrow_result.rs
+++ b/src/servers/src/http/arrow_result.rs
@@ -16,7 +16,8 @@ use std::pin::Pin;
use std::sync::Arc;
use arrow::datatypes::Schema;
-use arrow_ipc::writer::FileWriter;
+use arrow_ipc::writer::{FileWriter, IpcWriteOptions};
+use arrow_ipc::CompressionType;
use axum::http::{header, HeaderValue};
use axum::response::{IntoResponse, Response};
use common_error::status_code::StatusCode;
@@ -41,10 +42,15 @@ pub struct ArrowResponse {
async fn write_arrow_bytes(
mut recordbatches: Pin<Box<dyn RecordBatchStream + Send>>,
schema: &Arc<Schema>,
+ compression: Option<CompressionType>,
) -> Result<Vec<u8>, Error> {
let mut bytes = Vec::new();
{
- let mut writer = FileWriter::try_new(&mut bytes, schema).context(error::ArrowSnafu)?;
+ let options = IpcWriteOptions::default()
+ .try_with_compression(compression)
+ .context(error::ArrowSnafu)?;
+ let mut writer = FileWriter::try_new_with_options(&mut bytes, schema, options)
+ .context(error::ArrowSnafu)?;
while let Some(rb) = recordbatches.next().await {
let rb = rb.context(error::CollectRecordbatchSnafu)?;
@@ -59,8 +65,22 @@ async fn write_arrow_bytes(
Ok(bytes)
}
+fn compression_type(compression: Option<String>) -> Option<CompressionType> {
+ match compression
+ .map(|compression| compression.to_lowercase())
+ .as_deref()
+ {
+ Some("zstd") => Some(CompressionType::ZSTD),
+ Some("lz4") => Some(CompressionType::LZ4_FRAME),
+ _ => None,
+ }
+}
+
impl ArrowResponse {
- pub async fn from_output(mut outputs: Vec<error::Result<Output>>) -> HttpResponse {
+ pub async fn from_output(
+ mut outputs: Vec<error::Result<Output>>,
+ compression: Option<String>,
+ ) -> HttpResponse {
if outputs.len() > 1 {
return HttpResponse::Error(ErrorResponse::from_error_message(
StatusCode::InvalidArguments,
@@ -68,6 +88,8 @@ impl ArrowResponse {
));
}
+ let compression = compression_type(compression);
+
match outputs.pop() {
None => HttpResponse::Arrow(ArrowResponse {
data: vec![],
@@ -80,7 +102,9 @@ impl ArrowResponse {
}),
OutputData::RecordBatches(batches) => {
let schema = batches.schema();
- match write_arrow_bytes(batches.as_stream(), schema.arrow_schema()).await {
+ match write_arrow_bytes(batches.as_stream(), schema.arrow_schema(), compression)
+ .await
+ {
Ok(payload) => HttpResponse::Arrow(ArrowResponse {
data: payload,
execution_time_ms: 0,
@@ -90,7 +114,7 @@ impl ArrowResponse {
}
OutputData::Stream(batches) => {
let schema = batches.schema();
- match write_arrow_bytes(batches, schema.arrow_schema()).await {
+ match write_arrow_bytes(batches, schema.arrow_schema(), compression).await {
Ok(payload) => HttpResponse::Arrow(ArrowResponse {
data: payload,
execution_time_ms: 0,
@@ -136,3 +160,64 @@ impl IntoResponse for ArrowResponse {
.into_response()
}
}
+
+#[cfg(test)]
+mod test {
+ use std::io::Cursor;
+
+ use arrow_ipc::reader::FileReader;
+ use arrow_schema::DataType;
+ use common_recordbatch::{RecordBatch, RecordBatches};
+ use datatypes::prelude::*;
+ use datatypes::schema::{ColumnSchema, Schema};
+ use datatypes::vectors::{StringVector, UInt32Vector};
+
+ use super::*;
+
+ #[tokio::test]
+ async fn test_arrow_output() {
+ let column_schemas = vec![
+ ColumnSchema::new("numbers", ConcreteDataType::uint32_datatype(), false),
+ ColumnSchema::new("strings", ConcreteDataType::string_datatype(), true),
+ ];
+ let schema = Arc::new(Schema::new(column_schemas));
+ let columns: Vec<VectorRef> = vec![
+ Arc::new(UInt32Vector::from_slice(vec![1, 2, 3, 4])),
+ Arc::new(StringVector::from(vec![
+ None,
+ Some("hello"),
+ Some("greptime"),
+ None,
+ ])),
+ ];
+
+ for compression in [None, Some("zstd".to_string()), Some("lz4".to_string())].into_iter() {
+ let recordbatch = RecordBatch::new(schema.clone(), columns.clone()).unwrap();
+ let recordbatches =
+ RecordBatches::try_new(schema.clone(), vec![recordbatch.clone()]).unwrap();
+ let outputs = vec![Ok(Output::new_with_record_batches(recordbatches))];
+
+ let http_resp = ArrowResponse::from_output(outputs, compression).await;
+ match http_resp {
+ HttpResponse::Arrow(resp) => {
+ let output = resp.data;
+ let mut reader =
+ FileReader::try_new(Cursor::new(output), None).expect("Arrow reader error");
+ let schema = reader.schema();
+ assert_eq!(schema.fields[0].name(), "numbers");
+ assert_eq!(schema.fields[0].data_type(), &DataType::UInt32);
+ assert_eq!(schema.fields[1].name(), "strings");
+ assert_eq!(schema.fields[1].data_type(), &DataType::Utf8);
+
+ let rb = reader.next().unwrap().expect("read record batch failed");
+ assert_eq!(rb.num_columns(), 2);
+ assert_eq!(rb.num_rows(), 4);
+ }
+ HttpResponse::Error(e) => {
+ panic!("unexpected {:?}", e);
+ }
+ _ => unreachable!(),
+ }
+ }
+ }
+}
diff --git a/src/servers/src/http/handler.rs b/src/servers/src/http/handler.rs
index 4d5ca5846159..1befc2224014 100644
--- a/src/servers/src/http/handler.rs
+++ b/src/servers/src/http/handler.rs
@@ -51,7 +51,8 @@ use crate::query_handler::sql::ServerSqlQueryHandlerRef;
pub struct SqlQuery {
pub db: Option<String>,
pub sql: Option<String>,
- // (Optional) result format: [`greptimedb_v1`, `influxdb_v1`, `csv`],
+ // (Optional) result format: [`greptimedb_v1`, `influxdb_v1`, `csv`,
+ // `arrow`],
// the default value is `greptimedb_v1`
pub format: Option<String>,
// Returns epoch timestamps with the specified precision.
@@ -64,6 +65,8 @@ pub struct SqlQuery {
// param too.
pub epoch: Option<String>,
pub limit: Option<usize>,
+ // For arrow output
+ pub compression: Option<String>,
}
/// Handler to execute sql
@@ -128,7 +131,9 @@ pub async fn sql(
};
let mut resp = match format {
- ResponseFormat::Arrow => ArrowResponse::from_output(outputs).await,
+ ResponseFormat::Arrow => {
+ ArrowResponse::from_output(outputs, query_params.compression).await
+ }
ResponseFormat::Csv => CsvResponse::from_output(outputs).await,
ResponseFormat::Table => TableResponse::from_output(outputs).await,
ResponseFormat::GreptimedbV1 => GreptimedbV1Response::from_output(outputs).await,
|
feat
|
add an option to turn on compression for arrow output (#4730)
|
87c21e2baa27ee6913394d39e5ab32031453d3c3
|
2025-01-19 18:25:25
|
discord9
|
fix(flow): deal with flow drop leftover (#5391)
| false
|
diff --git a/src/flow/src/adapter.rs b/src/flow/src/adapter.rs
index 373cc7e8917b..777dcbcdf88e 100644
--- a/src/flow/src/adapter.rs
+++ b/src/flow/src/adapter.rs
@@ -284,12 +284,29 @@ impl FlowWorkerManager {
let (catalog, schema) = (table_name[0].clone(), table_name[1].clone());
let ctx = Arc::new(QueryContext::with(&catalog, &schema));
- let (is_ts_placeholder, proto_schema) = self
+ let (is_ts_placeholder, proto_schema) = match self
.try_fetch_existing_table(&table_name)
.await?
.context(UnexpectedSnafu {
reason: format!("Table not found: {}", table_name.join(".")),
- })?;
+ }) {
+ Ok(r) => r,
+ Err(e) => {
+ if self
+ .table_info_source
+ .get_opt_table_id_from_name(&table_name)
+ .await?
+ .is_none()
+ {
+ // deal with both flow&sink table no longer exists
+ // but some output is still in output buf
+ common_telemetry::warn!(e; "Table `{}` no longer exists, skip writeback", table_name.join("."));
+ continue;
+ } else {
+ return Err(e);
+ }
+ }
+ };
let schema_len = proto_schema.len();
let total_rows = reqs.iter().map(|r| r.len()).sum::<usize>();
|
fix
|
deal with flow drop leftover (#5391)
|
d5648c18c1692283f1c1d4188081f62737555551
|
2024-01-26 12:43:28
|
discord9
|
docs: RFC of Dataflow Framework (#3185)
| false
|
diff --git a/docs/rfcs/2024-01-17-dataflow-framework.md b/docs/rfcs/2024-01-17-dataflow-framework.md
new file mode 100644
index 000000000000..3d62deba42d5
--- /dev/null
+++ b/docs/rfcs/2024-01-17-dataflow-framework.md
@@ -0,0 +1,97 @@
+---
+Feature Name: Dataflow Framework
+Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/3187
+Date: 2024-01-17
+Author: "Discord9 <[email protected]>"
+---
+
+# Summary
+This RFC proposes a Lightweight Module for executing continuous aggregation queries on a stream of data.
+
+# Motivation
+Being able to do continuous aggregation is a very powerful tool. It allows you to do things like:
+1. downsample data from i.e. 1 milliseconds to 1 second
+2. calculate the average of a stream of data
+3. Keeping a sliding window of data in memory
+In order to do those things while maintaining a low memory footprint, you need to be able to manage the data in a smart way. Hence, we only store necessary data in memory, and send/recv data deltas to/from the client.
+
+# Details
+
+## System boundary / What it's and isn't
+- GreptimeFlow provides a way to perform continuous aggregation over time-series data.
+- It's not a complete streaming-processing system. Only a must subset functionalities are provided.
+- Flow can process a configured range of fresh data. Data exceeding this range will be dropped directly. Thus it cannot handle random datasets (random on timestamp).
+- Both sliding windows (e.g., latest 5m from present) and fixed windows (every 5m from some time) are supported. And these two are the major targeting scenarios.
+- Flow can handle most aggregate operators within one table(i.e. Sum, avg, min, max and comparison operators). But others (join, trigger, txn etc.) are not the target feature.
+
+## Framework
+- Greptime Flow's is built on top of [Hydroflow](https://github.com/hydro-project/hydroflow).
+- We have three choices for the Dataflow/Streaming process framework for our simple continuous aggregation feature:
+1. Based on the timely/differential dataflow crate that [materialize](https://github.com/MaterializeInc/materialize) based on. Later, it's proved too obscure for a simple usage, and is hard to customize memory usage control.
+2. Based on a simple dataflow framework that we write from ground up, like what [arroyo](https://www.arroyo.dev/) or [risingwave](https://www.risingwave.dev/) did, for example the core streaming logic of [arroyo](https://github.com/ArroyoSystems/arroyo/blob/master/arroyo-datastream/src/lib.rs) only takes up to 2000 line of codes. However, it means maintaining another layer of dataflow framework, which might seem easy in the beginning, but I fear it might be too burdensome to maintain once we need more features.
+3. Based on a simple and lower level dataflow framework that someone else write, like [hydroflow](https://github.com/hydro-project/hydroflow), this approach combines the best of both worlds. Firstly, it boasts ease of comprehension and customization. Secondly, the dataflow framework offers precisely the necessary features for crafting uncomplicated single-node dataflow programs while delivering decent performance.
+
+Hence, we choose the third option, and use a simple logical plan that's anagonistic to the underlying dataflow framework, as it only describe how the dataflow graph should be doing, not how it do that. And we built operator in hydroflow to execute the plan. And the result hydroflow graph is wrapped in a engine that only support data in/out and tick event to flush and compute the result. This provide a thin middle layer that's easy to maintain and allow switching to other dataflow framework if necessary.
+
+## Deploy mode and protocol
+- Greptime Flow is an independent streaming compute component. It can be used either within a standalone node or as a dedicated node at the same level as frontend in distributed mode.
+- It accepts insert request Rows, which is used between frontend and datanode.
+- New flow job is submitted in the format of modified SQL query like snowflake do, like: `CREATE TASK avg_over_5m WINDOW_SIZE = "5m" AS SELECT avg(value) FROM table WHERE time > now() - 5m GROUP BY time(1m)`. Flow job then got stored in MetaSrv.
+- It also persists results in the format of Rows to frontend.
+- The query plan uses Substrait as codec format. It's the same with GreptimeDB's query engine.
+- Greptime Flow needs a WAL for recovering. It's possible to reuse datanode's.
+
+The workflow is shown in the following diagram
+```mermaid
+graph TB
+subgraph Flownode["Flownode"]
+ subgraph Dataflows
+ df1("Dataflow_1")
+ df2("Dataflow_2")
+ end
+end
+subgraph Frontend["Frontend"]
+ newLines["Mirror Insert
+Create Task From Query
+Write result from flow node"]
+end
+
+subgraph Datanode["Datanode"]
+end
+
+User --> Frontend
+Frontend -->|Register Task| Metasrv
+Metasrv -->|Read Task Metadata| Frontend
+Frontend -->|Create Task| Flownode
+
+Frontend -->|Mirror Insert| Flownode
+Flownode -->|Write back| Frontend
+
+Frontend --> Datanode
+Datanode --> Frontend
+
+```
+
+## Lifecycle of data
+- New data is inserted into frontend like before. Frontend will mirror insert request to Flow node if there is configured flow job.
+- Depending on the timestamp of incoming data, flow will either drop it (outdated data) or process it (fresh data).
+- Greptime Flow will periodically write results back to the result table through frontend.
+- Those result will then be written into a result table stored in datanode.
+- A small table of intermediate state is kept in memory, which is used to calculate the result.
+## Supported operations
+- Greptime Flow accepts a configurable "materialize window", data point exceeds that time window is discarded.
+- Data within that "materialize window" is queryable and updateable.
+- Greptime Flow can handle partitioning, if and only if the input query can be transformed to a fully partitioned plan according to the existing commutative rules. Otherwise the corresponding flow job has to be calculated in a single node.
+- Notice that Greptime Flow has to see all the data belongs to one partition.
+- Deletion and duplicate insertion are not supported at early stage.
+## Miscellaneous
+- Greptime Flow can translate SQL to it's own plan, however only a selected few aggregate function is supported for now, like min/max/sum/count/avg
+- Greptime Flow's operator is configurable in terms of the size of the materialize window, whether to allow delay of incoming data etc., so simplest operator can choose to not tolerate any delay to save memory.
+
+# Future Work
+- Support UDF that can do one-to-one mapping. Preferably, we can reuse the UDF mechanism in GreptimeDB.
+- Support join operator.
+- Design syntax for config operator for different materialize window and delay tolerance.
+- Support cross partition merge operator that allows complex query plan that not necessary accord with partitioning rule to communicate between nodes and create final materialize result.
+- Duplicate insertion, which can be reverted easily within the current framework, so supporting it could be easy
+- Deletion within "materialize window", this requires operators like min/max to store all inputs within materialize window, which might require further optimization.
|
docs
|
RFC of Dataflow Framework (#3185)
|
448e8f139e23b89514b64a43e459efb023a46c19
|
2022-11-14 20:46:52
|
dennis zhuang
|
fix: table and database conflicts (#491)
| false
|
diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml
index 88dd007b4db7..800ec601afdf 100644
--- a/.github/workflows/coverage.yml
+++ b/.github/workflows/coverage.yml
@@ -15,7 +15,7 @@ env:
jobs:
coverage:
if: github.event.pull_request.draft == false
- runs-on: ubuntu-latest
+ runs-on: ubuntu-latest-16-cores
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
diff --git a/src/catalog/tests/mock.rs b/src/catalog/tests/mock.rs
index f41e1da533ea..cc9f67c7d488 100644
--- a/src/catalog/tests/mock.rs
+++ b/src/catalog/tests/mock.rs
@@ -13,7 +13,7 @@ use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnSchema, Schema};
use datatypes::vectors::StringVector;
use serde::Serializer;
-use table::engine::{EngineContext, TableEngine};
+use table::engine::{EngineContext, TableEngine, TableReference};
use table::metadata::TableId;
use table::requests::{AlterTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest};
use table::test_util::MemTable;
@@ -175,12 +175,28 @@ impl TableEngine for MockTableEngine {
unimplemented!()
}
- fn get_table(&self, _ctx: &EngineContext, name: &str) -> table::Result<Option<TableRef>> {
- futures::executor::block_on(async { Ok(self.tables.read().await.get(name).cloned()) })
+ fn get_table<'a>(
+ &self,
+ _ctx: &EngineContext,
+ table_ref: &'a TableReference,
+ ) -> table::Result<Option<TableRef>> {
+ futures::executor::block_on(async {
+ Ok(self
+ .tables
+ .read()
+ .await
+ .get(&table_ref.to_string())
+ .cloned())
+ })
}
- fn table_exists(&self, _ctx: &EngineContext, name: &str) -> bool {
- futures::executor::block_on(async { self.tables.read().await.contains_key(name) })
+ fn table_exists<'a>(&self, _ctx: &EngineContext, table_ref: &'a TableReference) -> bool {
+ futures::executor::block_on(async {
+ self.tables
+ .read()
+ .await
+ .contains_key(&table_ref.to_string())
+ })
}
async fn drop_table(
diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs
index 1b38f9829008..95247dd30501 100644
--- a/src/datanode/src/instance.rs
+++ b/src/datanode/src/instance.rs
@@ -9,6 +9,7 @@ use frontend::frontend::Mode;
use log_store::fs::{config::LogConfig, log::LocalFileLogStore};
use meta_client::client::{MetaClient, MetaClientBuilder};
use meta_client::MetaClientOpts;
+use object_store::layers::LoggingLayer;
use object_store::{services::fs::Builder, util, ObjectStore};
use query::query_engine::{QueryEngineFactory, QueryEngineRef};
use snafu::prelude::*;
@@ -156,7 +157,9 @@ pub(crate) async fn new_object_store(store_config: &ObjectStoreConfig) -> Result
.build()
.context(error::InitBackendSnafu { dir: &data_dir })?;
- Ok(ObjectStore::new(accessor))
+ let object_store = ObjectStore::new(accessor).layer(LoggingLayer); // Add logging
+
+ Ok(object_store)
}
/// Create metasrv client instance and spawn heartbeat loop.
diff --git a/src/datanode/src/instance/grpc.rs b/src/datanode/src/instance/grpc.rs
index 1dee0a0e27c3..5e0be36a25da 100644
--- a/src/datanode/src/instance/grpc.rs
+++ b/src/datanode/src/instance/grpc.rs
@@ -4,7 +4,7 @@ use api::v1::{
ObjectExpr, ObjectResult, SelectExpr,
};
use async_trait::async_trait;
-use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
+use common_catalog::consts::DEFAULT_CATALOG_NAME;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_insert::insertion_expr_to_request;
@@ -152,9 +152,8 @@ impl GrpcQueryHandler for Instance {
async fn do_query(&self, query: ObjectExpr) -> servers::error::Result<ObjectResult> {
let object_resp = match query.expr {
Some(object_expr::Expr::Insert(insert_expr)) => {
- // TODO(dennis): retrieve schema name from DatabaseRequest
let catalog_name = DEFAULT_CATALOG_NAME;
- let schema_name = DEFAULT_SCHEMA_NAME;
+ let schema_name = &insert_expr.schema_name;
let table_name = &insert_expr.table_name;
let expr = insert_expr
.expr
diff --git a/src/datanode/src/sql.rs b/src/datanode/src/sql.rs
index b3cf4794dbf2..9ff6abf76e83 100644
--- a/src/datanode/src/sql.rs
+++ b/src/datanode/src/sql.rs
@@ -5,7 +5,7 @@ use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_query::Output;
use snafu::{OptionExt, ResultExt};
use sql::statements::show::{ShowDatabases, ShowTables};
-use table::engine::{EngineContext, TableEngineRef};
+use table::engine::{EngineContext, TableEngineRef, TableReference};
use table::requests::*;
use table::TableRef;
@@ -54,11 +54,15 @@ impl SqlHandler {
}
}
- pub(crate) fn get_table(&self, table_name: &str) -> Result<TableRef> {
+ pub(crate) fn get_table<'a>(&self, table_ref: &'a TableReference) -> Result<TableRef> {
self.table_engine
- .get_table(&EngineContext::default(), table_name)
- .context(GetTableSnafu { table_name })?
- .context(TableNotFoundSnafu { table_name })
+ .get_table(&EngineContext::default(), table_ref)
+ .with_context(|_| GetTableSnafu {
+ table_name: table_ref.to_string(),
+ })?
+ .with_context(|| TableNotFoundSnafu {
+ table_name: table_ref.to_string(),
+ })
}
pub(crate) fn get_default_catalog(&self) -> Result<CatalogProviderRef> {
diff --git a/src/datanode/src/sql/alter.rs b/src/datanode/src/sql/alter.rs
index 63d98a40dc80..87ca827850f0 100644
--- a/src/datanode/src/sql/alter.rs
+++ b/src/datanode/src/sql/alter.rs
@@ -1,8 +1,9 @@
+use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_query::Output;
use snafu::prelude::*;
use sql::statements::alter::{AlterTable, AlterTableOperation};
use sql::statements::{column_def_to_schema, table_idents_to_full_name};
-use table::engine::EngineContext;
+use table::engine::{EngineContext, TableReference};
use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest};
use crate::error::{self, Result};
@@ -11,15 +12,29 @@ use crate::sql::SqlHandler;
impl SqlHandler {
pub(crate) async fn alter(&self, req: AlterTableRequest) -> Result<Output> {
let ctx = EngineContext {};
- let table_name = &req.table_name.clone();
+ let catalog_name = req.catalog_name.as_deref().unwrap_or(DEFAULT_CATALOG_NAME);
+ let schema_name = req.schema_name.as_deref().unwrap_or(DEFAULT_SCHEMA_NAME);
+ let table_name = &req.table_name.to_string();
+ let table_ref = TableReference {
+ catalog: catalog_name,
+ schema: schema_name,
+ table: table_name,
+ };
+
+ let full_table_name = table_ref.to_string();
+
ensure!(
- self.table_engine.table_exists(&ctx, table_name),
- error::TableNotFoundSnafu { table_name }
+ self.table_engine.table_exists(&ctx, &table_ref),
+ error::TableNotFoundSnafu {
+ table_name: &full_table_name,
+ }
);
self.table_engine
.alter_table(&ctx, req)
.await
- .context(error::AlterTableSnafu { table_name })?;
+ .context(error::AlterTableSnafu {
+ table_name: full_table_name,
+ })?;
// Tried in MySQL, it really prints "Affected Rows: 0".
Ok(Output::AffectedRows(0))
}
diff --git a/src/datanode/src/sql/insert.rs b/src/datanode/src/sql/insert.rs
index 556842c514f5..fd1a7e6fe8fc 100644
--- a/src/datanode/src/sql/insert.rs
+++ b/src/datanode/src/sql/insert.rs
@@ -7,6 +7,7 @@ use snafu::OptionExt;
use snafu::ResultExt;
use sql::ast::Value as SqlValue;
use sql::statements::{self, insert::Insert};
+use table::engine::TableReference;
use table::requests::*;
use crate::error::{
@@ -17,13 +18,19 @@ use crate::sql::{SqlHandler, SqlRequest};
impl SqlHandler {
pub(crate) async fn insert(&self, req: InsertRequest) -> Result<Output> {
- let table_name = &req.table_name.to_string();
- let table = self.get_table(table_name)?;
+ // FIXME(dennis): table_ref is used in InsertSnafu and the req is consumed
+ // in `insert`, so we have to clone catalog_name etc.
+ let table_ref = TableReference {
+ catalog: &req.catalog_name.to_string(),
+ schema: &req.schema_name.to_string(),
+ table: &req.table_name.to_string(),
+ };
+
+ let table = self.get_table(&table_ref)?;
- let affected_rows = table
- .insert(req)
- .await
- .context(InsertSnafu { table_name })?;
+ let affected_rows = table.insert(req).await.with_context(|_| InsertSnafu {
+ table_name: table_ref.to_string(),
+ })?;
Ok(Output::AffectedRows(affected_rows))
}
diff --git a/src/datanode/src/tests/grpc_test.rs b/src/datanode/src/tests/grpc_test.rs
index ed0437fa7ee1..d1235403536e 100644
--- a/src/datanode/src/tests/grpc_test.rs
+++ b/src/datanode/src/tests/grpc_test.rs
@@ -24,12 +24,13 @@ use crate::tests::test_util::{self, TestGuard};
async fn setup_grpc_server(
name: &str,
- port: usize,
+ datanode_port: usize,
+ frontend_port: usize,
) -> (String, TestGuard, Arc<GrpcServer>, Arc<GrpcServer>) {
common_telemetry::init_default_ut_logging();
let (mut opts, guard) = test_util::create_tmp_dir_and_datanode_opts(name);
- let datanode_grpc_addr = format!("127.0.0.1:{}", port);
+ let datanode_grpc_addr = format!("127.0.0.1:{}", datanode_port);
opts.rpc_addr = datanode_grpc_addr.clone();
let instance = Arc::new(Instance::with_mock_meta_client(&opts).await.unwrap());
instance.start().await.unwrap();
@@ -43,7 +44,7 @@ async fn setup_grpc_server(
.unwrap(),
);
- let fe_grpc_addr = format!("127.0.0.1:{}", port + 1);
+ let fe_grpc_addr = format!("127.0.0.1:{}", frontend_port);
let fe_opts = FrontendOptions {
mode: Standalone,
datanode_rpc_addr: datanode_grpc_addr.clone(),
@@ -95,7 +96,7 @@ async fn setup_grpc_server(
#[tokio::test(flavor = "multi_thread")]
async fn test_auto_create_table() {
let (addr, _guard, fe_grpc_server, dn_grpc_server) =
- setup_grpc_server("auto_create_table", 3991).await;
+ setup_grpc_server("auto_create_table", 3992, 3993).await;
let grpc_client = Client::with_urls(vec![addr]);
let db = Database::new("greptime", grpc_client);
@@ -162,7 +163,7 @@ fn expect_data() -> (Column, Column, Column, Column) {
async fn test_insert_and_select() {
common_telemetry::init_default_ut_logging();
let (addr, _guard, fe_grpc_server, dn_grpc_server) =
- setup_grpc_server("insert_and_select", 3990).await;
+ setup_grpc_server("insert_and_select", 3990, 3991).await;
let grpc_client = Client::with_urls(vec![addr]);
diff --git a/src/datanode/src/tests/instance_test.rs b/src/datanode/src/tests/instance_test.rs
index 7e5c761082db..d75bff672b33 100644
--- a/src/datanode/src/tests/instance_test.rs
+++ b/src/datanode/src/tests/instance_test.rs
@@ -1,4 +1,4 @@
-use arrow::array::{Int64Array, UInt64Array};
+use arrow::array::{Int64Array, UInt64Array, Utf8Array};
use common_query::Output;
use common_recordbatch::util;
use datafusion::arrow_print;
@@ -64,6 +64,106 @@ async fn test_create_database_and_insert_query() {
_ => unreachable!(),
}
}
+#[tokio::test(flavor = "multi_thread")]
+async fn test_issue477_same_table_name_in_different_databases() {
+ common_telemetry::init_default_ut_logging();
+
+ let (opts, _guard) =
+ test_util::create_tmp_dir_and_datanode_opts("create_database_and_insert_query");
+ let instance = Instance::with_mock_meta_client(&opts).await.unwrap();
+ instance.start().await.unwrap();
+
+ // Create database a and b
+ let output = instance.execute_sql("create database a").await.unwrap();
+ assert!(matches!(output, Output::AffectedRows(1)));
+ let output = instance.execute_sql("create database b").await.unwrap();
+ assert!(matches!(output, Output::AffectedRows(1)));
+
+ // Create table a.demo and b.demo
+ let output = instance
+ .execute_sql(
+ r#"create table a.demo(
+ host STRING,
+ ts bigint,
+ TIME INDEX(ts)
+)"#,
+ )
+ .await
+ .unwrap();
+ assert!(matches!(output, Output::AffectedRows(1)));
+
+ let output = instance
+ .execute_sql(
+ r#"create table b.demo(
+ host STRING,
+ ts bigint,
+ TIME INDEX(ts)
+)"#,
+ )
+ .await
+ .unwrap();
+ assert!(matches!(output, Output::AffectedRows(1)));
+
+ // Insert different data into a.demo and b.demo
+ let output = instance
+ .execute_sql(
+ r#"insert into a.demo(host, ts) values
+ ('host1', 1655276557000)
+ "#,
+ )
+ .await
+ .unwrap();
+ assert!(matches!(output, Output::AffectedRows(1)));
+ let output = instance
+ .execute_sql(
+ r#"insert into b.demo(host, ts) values
+ ('host2',1655276558000)
+ "#,
+ )
+ .await
+ .unwrap();
+ assert!(matches!(output, Output::AffectedRows(1)));
+
+ // Query data and assert
+ assert_query_result(
+ &instance,
+ "select host,ts from a.demo order by ts",
+ 1655276557000,
+ "host1",
+ )
+ .await;
+
+ assert_query_result(
+ &instance,
+ "select host,ts from b.demo order by ts",
+ 1655276558000,
+ "host2",
+ )
+ .await;
+}
+
+async fn assert_query_result(instance: &Instance, sql: &str, ts: i64, host: &str) {
+ let query_output = instance.execute_sql(sql).await.unwrap();
+ match query_output {
+ Output::Stream(s) => {
+ let batches = util::collect(s).await.unwrap();
+ let columns = batches[0].df_recordbatch.columns();
+ assert_eq!(2, columns.len());
+ assert_eq!(
+ &Utf8Array::<i32>::from_slice(&[host]),
+ columns[0]
+ .as_any()
+ .downcast_ref::<Utf8Array<i32>>()
+ .unwrap()
+ );
+ assert_eq!(
+ &Int64Array::from_slice(&[ts]),
+ columns[1].as_any().downcast_ref::<Int64Array>().unwrap()
+ );
+ }
+ _ => unreachable!(),
+ }
+}
#[tokio::test(flavor = "multi_thread")]
async fn test_execute_insert() {
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index 6310faa9068d..2f0aae543819 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -12,14 +12,15 @@ use api::v1::alter_expr::Kind;
use api::v1::codec::InsertBatch;
use api::v1::object_expr::Expr;
use api::v1::{
- insert_expr, AddColumns, AdminExpr, AdminResult, AlterExpr, CreateDatabaseExpr, CreateExpr,
- InsertExpr, ObjectExpr, ObjectResult as GrpcObjectResult,
+ admin_expr, insert_expr, AddColumns, AdminExpr, AdminResult, AlterExpr, CreateDatabaseExpr,
+ CreateExpr, InsertExpr, ObjectExpr, ObjectResult as GrpcObjectResult,
};
use async_trait::async_trait;
use catalog::remote::MetaKvBackend;
use catalog::{CatalogManagerRef, CatalogProviderRef, SchemaProviderRef};
use client::admin::{admin_result_to_output, Admin};
use client::{Client, Database, Select};
+use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_error::prelude::{BoxedError, StatusCode};
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
use common_query::Output;
@@ -155,14 +156,12 @@ impl Instance {
Ok(instance)
}
- // TODO(fys): temporarily hard code
- pub fn database(&self) -> Database {
- Database::new("greptime", self.client.clone())
+ pub fn database(&self, database: &str) -> Database {
+ Database::new(database, self.client.clone())
}
- // TODO(fys): temporarily hard code
- pub fn admin(&self) -> Admin {
- Admin::new("greptime", self.client.clone())
+ pub fn admin(&self, database: &str) -> Admin {
+ Admin::new(database, self.client.clone())
}
pub fn set_catalog_manager(&mut self, catalog_manager: CatalogManagerRef) {
@@ -173,7 +172,9 @@ impl Instance {
if let Some(dist_instance) = &self.dist_instance {
dist_instance.handle_select(expr).await
} else {
- self.database()
+ // TODO(LFC): Find a better way to execute query between Frontend and Datanode in standalone mode.
+ // Otherwise we have to parse SQL first to get schema name. Maybe not GRPC.
+ self.database(DEFAULT_SCHEMA_NAME)
.select(expr)
.await
.and_then(Output::try_from)
@@ -191,7 +192,10 @@ impl Instance {
v.create_table(&mut expr, partitions).await
} else {
// Currently standalone mode does not support multi partitions/regions.
- let result = self.admin().create(expr.clone()).await;
+ let result = self
+ .admin(expr.schema_name.as_deref().unwrap_or(DEFAULT_SCHEMA_NAME))
+ .create(expr.clone())
+ .await;
if let Err(e) = &result {
error!(e; "Failed to create table by expr: {:?}", expr);
}
@@ -203,7 +207,7 @@ impl Instance {
/// Handle create database expr.
pub async fn handle_create_database(&self, expr: CreateDatabaseExpr) -> Result<Output> {
- self.admin()
+ self.admin(DEFAULT_SCHEMA_NAME)
.create_database(expr)
.await
.and_then(admin_result_to_output)
@@ -212,7 +216,7 @@ impl Instance {
/// Handle alter expr
pub async fn handle_alter(&self, expr: AlterExpr) -> Result<Output> {
- self.admin()
+ self.admin(expr.schema_name.as_deref().unwrap_or(DEFAULT_SCHEMA_NAME))
.alter(expr)
.await
.and_then(admin_result_to_output)
@@ -234,8 +238,8 @@ impl Instance {
/// Handle insert. for 'values' insertion, create/alter the destination table on demand.
pub async fn handle_insert(&self, insert_expr: &InsertExpr) -> Result<Output> {
let table_name = &insert_expr.table_name;
- let catalog_name = "greptime";
- let schema_name = "public";
+ let catalog_name = DEFAULT_CATALOG_NAME;
+ let schema_name = &insert_expr.schema_name;
if let Some(expr) = &insert_expr.expr {
match expr {
@@ -253,7 +257,7 @@ impl Instance {
}
api::v1::insert_expr::Expr::Sql(_) => {
// Frontend does not comprehend insert request that is raw SQL string
- self.database()
+ self.database(schema_name)
.insert(insert_expr.clone())
.await
.and_then(Output::try_from)
@@ -286,7 +290,7 @@ impl Instance {
&insert_batches,
)
.await?;
- self.database()
+ self.database(schema_name)
.insert(InsertExpr {
schema_name: schema_name.to_string(),
table_name: table_name.to_string(),
@@ -350,8 +354,13 @@ impl Instance {
"Find new columns {:?} on insertion, try to alter table: {}.{}.{}",
add_columns, catalog_name, schema_name, table_name
);
- self.add_new_columns_to_table(table_name, add_columns)
- .await?;
+ self.add_new_columns_to_table(
+ catalog_name,
+ schema_name,
+ table_name,
+ add_columns,
+ )
+ .await?;
info!(
"Successfully altered table on insertion: {}.{}.{}",
catalog_name, schema_name, table_name
@@ -386,6 +395,8 @@ impl Instance {
async fn add_new_columns_to_table(
&self,
+ catalog_name: &str,
+ schema_name: &str,
table_name: &str,
add_columns: AddColumns,
) -> Result<Output> {
@@ -395,11 +406,11 @@ impl Instance {
);
let expr = AlterExpr {
table_name: table_name.to_string(),
- schema_name: None,
- catalog_name: None,
+ schema_name: Some(schema_name.to_string()),
+ catalog_name: Some(catalog_name.to_string()),
kind: Some(Kind::AddColumns(add_columns)),
};
- self.admin()
+ self.admin(schema_name)
.alter(expr)
.await
.and_then(admin_result_to_output)
@@ -608,8 +619,10 @@ impl GrpcQueryHandler for Instance {
query: format!("{:?}", query),
})
}
+
+ // FIXME(hl): refactor
_ => self
- .database()
+ .database(DEFAULT_SCHEMA_NAME)
.object(query.clone())
.await
.map_err(BoxedError::new)
@@ -618,22 +631,27 @@ impl GrpcQueryHandler for Instance {
}),
}
} else {
- // why?
- self.database()
- .object(query.clone())
- .await
- .map_err(BoxedError::new)
- .with_context(|_| server_error::ExecuteQuerySnafu {
- query: format!("{:?}", query),
- })
+ server_error::InvalidQuerySnafu {
+ reason: "empty query",
+ }
+ .fail()
}
}
}
+fn get_schema_name(expr: &AdminExpr) -> &str {
+ let schema_name = match &expr.expr {
+ Some(admin_expr::Expr::Create(expr)) => expr.schema_name.as_deref(),
+ Some(admin_expr::Expr::Alter(expr)) => expr.schema_name.as_deref(),
+ Some(admin_expr::Expr::CreateDatabase(_)) | None => Some(DEFAULT_SCHEMA_NAME),
+ };
+ schema_name.unwrap_or(DEFAULT_SCHEMA_NAME)
+}
+
#[async_trait]
impl GrpcAdminHandler for Instance {
async fn exec_admin_request(&self, expr: AdminExpr) -> server_error::Result<AdminResult> {
- self.admin()
+ self.admin(get_schema_name(&expr))
.do_request(expr.clone())
.await
.map_err(BoxedError::new)
diff --git a/src/frontend/src/instance/prometheus.rs b/src/frontend/src/instance/prometheus.rs
index e3dc41aa0d77..6c2ec04f0012 100644
--- a/src/frontend/src/instance/prometheus.rs
+++ b/src/frontend/src/instance/prometheus.rs
@@ -68,7 +68,7 @@ async fn handle_remote_queries(
let mut results = Vec::with_capacity(queries.len());
for q in queries {
- let (table_name, sql) = prometheus::query_to_sql(q)?;
+ let (table_name, sql) = prometheus::query_to_sql(db.name(), q)?;
logging::debug!(
"prometheus remote read, table: {}, sql: {}",
@@ -90,10 +90,10 @@ async fn handle_remote_queries(
#[async_trait]
impl PrometheusProtocolHandler for Instance {
- async fn write(&self, request: WriteRequest) -> ServerResult<()> {
+ async fn write(&self, database: &str, request: WriteRequest) -> ServerResult<()> {
match self.mode {
Mode::Standalone => {
- let exprs = prometheus::write_request_to_insert_exprs(request)?;
+ let exprs = prometheus::write_request_to_insert_exprs(database, request)?;
let futures = exprs
.iter()
.map(|e| self.handle_insert(e))
@@ -108,7 +108,7 @@ impl PrometheusProtocolHandler for Instance {
})?;
}
Mode::Distributed(_) => {
- let inserts = prometheus::write_request_to_insert_reqs(request)?;
+ let inserts = prometheus::write_request_to_insert_reqs(database, request)?;
self.dist_insert(inserts)
.await
@@ -122,11 +122,11 @@ impl PrometheusProtocolHandler for Instance {
Ok(())
}
- async fn read(&self, request: ReadRequest) -> ServerResult<PrometheusResponse> {
+ async fn read(&self, database: &str, request: ReadRequest) -> ServerResult<PrometheusResponse> {
let response_type = negotiate_response_type(&request.accepted_response_types)?;
// TODO(dennis): use read_hints to speedup query if possible
- let results = handle_remote_queries(&self.database(), &request.queries).await?;
+ let results = handle_remote_queries(&self.database(database), &request.queries).await?;
match response_type {
ResponseType::Samples => {
@@ -165,6 +165,7 @@ mod tests {
use api::prometheus::remote::{
label_matcher::Type as MatcherType, Label, LabelMatcher, Sample,
};
+ use api::v1::CreateDatabaseExpr;
use super::*;
use crate::tests;
@@ -179,7 +180,16 @@ mod tests {
..Default::default()
};
- instance.write(write_request).await.unwrap();
+ let db = "prometheus";
+
+ instance
+ .handle_create_database(CreateDatabaseExpr {
+ database_name: db.to_string(),
+ })
+ .await
+ .unwrap();
+
+ instance.write(db, write_request).await.unwrap();
let read_request = ReadRequest {
queries: vec![
@@ -214,7 +224,7 @@ mod tests {
..Default::default()
};
- let resp = instance.read(read_request).await.unwrap();
+ let resp = instance.read(db, read_request).await.unwrap();
assert_eq!(resp.content_type, "application/x-protobuf");
assert_eq!(resp.content_encoding, "snappy");
let body = prometheus::snappy_decompress(&resp.body).unwrap();
diff --git a/src/object-store/src/lib.rs b/src/object-store/src/lib.rs
index 1b645695f0fb..03e47feb3752 100644
--- a/src/object-store/src/lib.rs
+++ b/src/object-store/src/lib.rs
@@ -1,5 +1,5 @@
pub use opendal::{
- io_util::SeekableReader, services, Accessor, DirEntry, DirStreamer, Layer, Object,
+ io_util::SeekableReader, layers, services, Accessor, DirEntry, DirStreamer, Layer, Object,
ObjectMetadata, ObjectMode, Operator as ObjectStore,
};
pub mod backend;
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index ab28bf798e12..ae360262d14c 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -329,7 +329,6 @@ impl HttpServer {
router = router.nest(&format!("/{}/opentsdb", HTTP_API_VERSION), opentsdb_router);
}
- // TODO(fys): Creating influxdb's database when we can create greptime schema.
if let Some(influxdb_handler) = self.influxdb_handler.clone() {
let influxdb_router =
Router::with_state(influxdb_handler).route("/write", routing::post(influxdb_write));
diff --git a/src/servers/src/http/influxdb.rs b/src/servers/src/http/influxdb.rs
index cef1716b7ac5..9a561fac92d1 100644
--- a/src/servers/src/http/influxdb.rs
+++ b/src/servers/src/http/influxdb.rs
@@ -2,6 +2,7 @@ use std::collections::HashMap;
use axum::extract::{Query, State};
use axum::http::StatusCode;
+use common_catalog::consts::DEFAULT_SCHEMA_NAME;
use common_grpc::writer::Precision;
use crate::error::Result;
@@ -12,14 +13,22 @@ use crate::query_handler::InfluxdbLineProtocolHandlerRef;
#[axum_macros::debug_handler]
pub async fn influxdb_write(
State(handler): State<InfluxdbLineProtocolHandlerRef>,
- Query(params): Query<HashMap<String, String>>,
+ Query(mut params): Query<HashMap<String, String>>,
lines: String,
) -> Result<(StatusCode, ())> {
+ let db = params
+ .remove("db")
+ .unwrap_or_else(|| DEFAULT_SCHEMA_NAME.to_string());
+
let precision = params
.get("precision")
.map(|val| parse_time_precision(val))
.transpose()?;
- let request = InfluxdbRequest { precision, lines };
+ let request = InfluxdbRequest {
+ precision,
+ lines,
+ db,
+ };
handler.exec(&request).await?;
Ok((StatusCode::NO_CONTENT, ()))
}
diff --git a/src/servers/src/http/prometheus.rs b/src/servers/src/http/prometheus.rs
index f740eb322f30..e286bcf75c35 100644
--- a/src/servers/src/http/prometheus.rs
+++ b/src/servers/src/http/prometheus.rs
@@ -1,24 +1,43 @@
use api::prometheus::remote::{ReadRequest, WriteRequest};
-use axum::extract::{RawBody, State};
+use axum::extract::{Query, RawBody, State};
use axum::http::header;
use axum::http::StatusCode;
use axum::response::IntoResponse;
+use common_catalog::consts::DEFAULT_SCHEMA_NAME;
use hyper::Body;
use prost::Message;
+use schemars::JsonSchema;
+use serde::{Deserialize, Serialize};
use snafu::prelude::*;
use crate::error::{self, Result};
use crate::prometheus::snappy_decompress;
use crate::query_handler::{PrometheusProtocolHandlerRef, PrometheusResponse};
+#[derive(Debug, Serialize, Deserialize, JsonSchema)]
+pub struct DatabaseQuery {
+ pub db: Option<String>,
+}
+
+impl Default for DatabaseQuery {
+ fn default() -> DatabaseQuery {
+ Self {
+ db: Some(DEFAULT_SCHEMA_NAME.to_string()),
+ }
+ }
+}
+
#[axum_macros::debug_handler]
pub async fn remote_write(
State(handler): State<PrometheusProtocolHandlerRef>,
+ Query(params): Query<DatabaseQuery>,
RawBody(body): RawBody,
) -> Result<(StatusCode, ())> {
let request = decode_remote_write_request(body).await?;
- handler.write(request).await?;
+ handler
+ .write(params.db.as_deref().unwrap_or(DEFAULT_SCHEMA_NAME), request)
+ .await?;
Ok((StatusCode::NO_CONTENT, ()))
}
@@ -39,11 +58,14 @@ impl IntoResponse for PrometheusResponse {
#[axum_macros::debug_handler]
pub async fn remote_read(
State(handler): State<PrometheusProtocolHandlerRef>,
+ Query(params): Query<DatabaseQuery>,
RawBody(body): RawBody,
) -> Result<PrometheusResponse> {
let request = decode_remote_read_request(body).await?;
- handler.read(request).await
+ handler
+ .read(params.db.as_deref().unwrap_or(DEFAULT_SCHEMA_NAME), request)
+ .await
}
async fn decode_remote_write_request(body: Body) -> Result<WriteRequest> {
diff --git a/src/servers/src/influxdb.rs b/src/servers/src/influxdb.rs
index 47ced111224c..d09029bbc997 100644
--- a/src/servers/src/influxdb.rs
+++ b/src/servers/src/influxdb.rs
@@ -4,7 +4,6 @@ use api::v1::{
insert_expr::{self, Expr},
InsertExpr,
};
-use common_catalog::consts::DEFAULT_SCHEMA_NAME;
use common_grpc::writer::{LinesWriter, Precision};
use influxdb_line_protocol::{parse_lines, FieldValue};
use snafu::ResultExt;
@@ -18,6 +17,7 @@ pub const DEFAULT_TIME_PRECISION: Precision = Precision::NANOSECOND;
pub struct InfluxdbRequest {
pub precision: Option<Precision>,
+ pub db: String,
pub lines: String,
}
@@ -32,12 +32,13 @@ impl TryFrom<&InfluxdbRequest> for Vec<InsertRequest> {
.context(InfluxdbLineProtocolSnafu)?;
let line_len = lines.len();
let mut writers: HashMap<TableName, LineWriter> = HashMap::new();
+ let db = &value.db;
for line in lines {
let table_name = line.series.measurement;
let writer = writers
.entry(table_name.to_string())
- .or_insert_with(|| LineWriter::with_lines(table_name, line_len));
+ .or_insert_with(|| LineWriter::with_lines(db, table_name, line_len));
let tags = line.series.tag_set;
if let Some(tags) = tags {
@@ -81,8 +82,7 @@ impl TryFrom<&InfluxdbRequest> for Vec<InsertExpr> {
type Error = Error;
fn try_from(value: &InfluxdbRequest) -> Result<Self, Self::Error> {
- // InfluxDB uses default catalog name and schema name
- let schema_name = DEFAULT_SCHEMA_NAME.to_string();
+ let schema_name = value.db.to_string();
let mut writers: HashMap<TableName, LinesWriter> = HashMap::new();
let lines = parse_lines(&value.lines)
@@ -192,12 +192,14 @@ monitor2,host=host3 cpu=66.5 1663840496100023102
monitor2,host=host4 cpu=66.3,memory=1029 1663840496400340003";
let influxdb_req = &InfluxdbRequest {
+ db: "influxdb".to_string(),
precision: None,
lines: lines.to_string(),
};
let insert_reqs: Vec<InsertRequest> = influxdb_req.try_into().unwrap();
for insert_req in insert_reqs {
+ assert_eq!("influxdb", insert_req.schema_name);
match &insert_req.table_name[..] {
"monitor1" => assert_table_1(&insert_req),
"monitor2" => assert_table_2(&insert_req),
@@ -216,6 +218,7 @@ monitor2,host=host3 cpu=66.5 1663840496100023102
monitor2,host=host4 cpu=66.3,memory=1029 1663840496400340003";
let influxdb_req = &InfluxdbRequest {
+ db: "public".to_string(),
precision: None,
lines: lines.to_string(),
};
@@ -225,6 +228,7 @@ monitor2,host=host4 cpu=66.3,memory=1029 1663840496400340003";
assert_eq!(2, insert_exprs.len());
for expr in insert_exprs {
+ assert_eq!("public", expr.schema_name);
let values = match expr.expr.unwrap() {
Expr::Values(vals) => vals,
Expr::Sql(_) => panic!(),
diff --git a/src/servers/src/line_writer.rs b/src/servers/src/line_writer.rs
index ed6b6c5ab4e0..a2196b45b6da 100644
--- a/src/servers/src/line_writer.rs
+++ b/src/servers/src/line_writer.rs
@@ -1,6 +1,6 @@
use std::collections::HashMap;
-use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
+use common_catalog::consts::DEFAULT_CATALOG_NAME;
use common_grpc::writer::{to_ms_ts, Precision};
use common_time::{timestamp::TimeUnit::Millisecond, Timestamp};
use datatypes::{
@@ -15,6 +15,7 @@ type ColumnLen = usize;
type ColumnName = String;
pub struct LineWriter {
+ db: String,
table_name: String,
expected_rows: usize,
current_rows: usize,
@@ -22,8 +23,9 @@ pub struct LineWriter {
}
impl LineWriter {
- pub fn with_lines(table_name: impl Into<String>, lines: usize) -> Self {
+ pub fn with_lines(db: impl Into<String>, table_name: impl Into<String>, lines: usize) -> Self {
Self {
+ db: db.into(),
table_name: table_name.into(),
expected_rows: lines,
current_rows: 0,
@@ -122,8 +124,7 @@ impl LineWriter {
.collect();
InsertRequest {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
- // TODO(dennis): supports database
- schema_name: DEFAULT_SCHEMA_NAME.to_string(),
+ schema_name: self.db,
table_name: self.table_name,
columns_values,
}
@@ -134,6 +135,7 @@ impl LineWriter {
mod tests {
use std::sync::Arc;
+ use common_catalog::consts::DEFAULT_SCHEMA_NAME;
use common_time::Timestamp;
use datatypes::{value::Value, vectors::Vector};
@@ -141,7 +143,7 @@ mod tests {
#[test]
fn test_writer() {
- let mut writer = LineWriter::with_lines("demo".to_string(), 4);
+ let mut writer = LineWriter::with_lines(DEFAULT_SCHEMA_NAME, "demo".to_string(), 4);
writer.write_ts("ts", (1665893727685, Precision::MILLISECOND));
writer.write_tag("host", "host-1");
writer.write_i64("memory", 10_i64);
@@ -162,6 +164,7 @@ mod tests {
let insert_request = writer.finish();
assert_eq!("demo", insert_request.table_name);
+ assert_eq!(DEFAULT_SCHEMA_NAME, insert_request.schema_name);
let columns = insert_request.columns_values;
assert_eq!(5, columns.len());
diff --git a/src/servers/src/opentsdb/codec.rs b/src/servers/src/opentsdb/codec.rs
index aa743324c6c6..cd7f2ea2aa9a 100644
--- a/src/servers/src/opentsdb/codec.rs
+++ b/src/servers/src/opentsdb/codec.rs
@@ -117,7 +117,7 @@ impl DataPoint {
}
pub fn as_insert_request(&self) -> InsertRequest {
- let mut line_writer = LineWriter::with_lines(self.metric.clone(), 1);
+ let mut line_writer = LineWriter::with_lines(DEFAULT_SCHEMA_NAME, self.metric.clone(), 1);
line_writer.write_ts(
OPENTSDB_TIMESTAMP_COLUMN_NAME,
(self.ts_millis(), Precision::MILLISECOND),
diff --git a/src/servers/src/prometheus.rs b/src/servers/src/prometheus.rs
index 1070aba65eba..4dd642dfb507 100644
--- a/src/servers/src/prometheus.rs
+++ b/src/servers/src/prometheus.rs
@@ -11,7 +11,6 @@ use api::v1::{
codec::SelectResult, column, column::SemanticType, insert_expr, Column, ColumnDataType,
InsertExpr,
};
-use common_catalog::consts::DEFAULT_SCHEMA_NAME;
use common_grpc::writer::Precision::MILLISECOND;
use openmetrics_parser::{MetricsExposition, PrometheusType, PrometheusValue};
use snafu::{OptionExt, ResultExt};
@@ -32,7 +31,7 @@ pub struct Metrics {
/// Generate a sql from a remote request query
/// TODO(dennis): maybe use logical plan in future to prevent sql injection
-pub fn query_to_sql(q: &Query) -> Result<(String, String)> {
+pub fn query_to_sql(db: &str, q: &Query) -> Result<(String, String)> {
let start_timestamp_ms = q.start_timestamp_ms;
let end_timestamp_ms = q.end_timestamp_ms;
@@ -93,8 +92,8 @@ pub fn query_to_sql(q: &Query) -> Result<(String, String)> {
Ok((
table_name.to_string(),
format!(
- "select * from {} where {} order by {}",
- table_name, conditions, TIMESTAMP_COLUMN_NAME,
+ "select * from {}.{} where {} order by {}",
+ db, table_name, conditions, TIMESTAMP_COLUMN_NAME,
),
))
}
@@ -280,16 +279,19 @@ pub fn select_result_to_timeseries(
}
/// Cast a remote write request into InsertRequest
-pub fn write_request_to_insert_reqs(mut request: WriteRequest) -> Result<Vec<InsertRequest>> {
+pub fn write_request_to_insert_reqs(
+ db: &str,
+ mut request: WriteRequest,
+) -> Result<Vec<InsertRequest>> {
let timeseries = std::mem::take(&mut request.timeseries);
timeseries
.into_iter()
- .map(timeseries_to_insert_request)
+ .map(|timeseries| timeseries_to_insert_request(db, timeseries))
.collect()
}
-fn timeseries_to_insert_request(mut timeseries: TimeSeries) -> Result<InsertRequest> {
+fn timeseries_to_insert_request(db: &str, mut timeseries: TimeSeries) -> Result<InsertRequest> {
// TODO(dennis): save exemplars into a column
let labels = std::mem::take(&mut timeseries.labels);
let samples = std::mem::take(&mut timeseries.samples);
@@ -306,7 +308,7 @@ fn timeseries_to_insert_request(mut timeseries: TimeSeries) -> Result<InsertRequ
})?;
let row_count = samples.len();
- let mut line_writer = LineWriter::with_lines(table_name, row_count);
+ let mut line_writer = LineWriter::with_lines(db, table_name, row_count);
for sample in samples {
let ts_millis = sample.timestamp;
@@ -329,18 +331,21 @@ fn timeseries_to_insert_request(mut timeseries: TimeSeries) -> Result<InsertRequ
// TODO(fys): it will remove in the future.
/// Cast a remote write request into gRPC's InsertExpr.
-pub fn write_request_to_insert_exprs(mut request: WriteRequest) -> Result<Vec<InsertExpr>> {
+pub fn write_request_to_insert_exprs(
+ database: &str,
+ mut request: WriteRequest,
+) -> Result<Vec<InsertExpr>> {
let timeseries = std::mem::take(&mut request.timeseries);
timeseries
.into_iter()
- .map(timeseries_to_insert_expr)
+ .map(|timeseries| timeseries_to_insert_expr(database, timeseries))
.collect()
}
// TODO(fys): it will remove in the future.
-fn timeseries_to_insert_expr(mut timeseries: TimeSeries) -> Result<InsertExpr> {
- let schema_name = DEFAULT_SCHEMA_NAME.to_string();
+fn timeseries_to_insert_expr(database: &str, mut timeseries: TimeSeries) -> Result<InsertExpr> {
+ let schema_name = database.to_string();
// TODO(dennis): save exemplars into a column
let labels = std::mem::take(&mut timeseries.labels);
@@ -518,7 +523,7 @@ mod tests {
matchers: vec![],
..Default::default()
};
- let err = query_to_sql(&q).unwrap_err();
+ let err = query_to_sql("public", &q).unwrap_err();
assert!(matches!(err, error::Error::InvalidPromRemoteRequest { .. }));
let q = Query {
@@ -531,9 +536,9 @@ mod tests {
}],
..Default::default()
};
- let (table, sql) = query_to_sql(&q).unwrap();
+ let (table, sql) = query_to_sql("public", &q).unwrap();
assert_eq!("test", table);
- assert_eq!("select * from test where greptime_timestamp>=1000 AND greptime_timestamp<=2000 order by greptime_timestamp", sql);
+ assert_eq!("select * from public.test where greptime_timestamp>=1000 AND greptime_timestamp<=2000 order by greptime_timestamp", sql);
let q = Query {
start_timestamp_ms: 1000,
@@ -557,9 +562,9 @@ mod tests {
],
..Default::default()
};
- let (table, sql) = query_to_sql(&q).unwrap();
+ let (table, sql) = query_to_sql("public", &q).unwrap();
assert_eq!("test", table);
- assert_eq!("select * from test where greptime_timestamp>=1000 AND greptime_timestamp<=2000 AND job~'*prom*' AND instance!='localhost' order by greptime_timestamp", sql);
+ assert_eq!("select * from public.test where greptime_timestamp>=1000 AND greptime_timestamp<=2000 AND job~'*prom*' AND instance!='localhost' order by greptime_timestamp", sql);
}
#[test]
@@ -569,11 +574,12 @@ mod tests {
..Default::default()
};
- let reqs = write_request_to_insert_reqs(write_request).unwrap();
+ let reqs = write_request_to_insert_reqs("public", write_request).unwrap();
assert_eq!(3, reqs.len());
let req1 = reqs.get(0).unwrap();
+ assert_eq!("public", req1.schema_name);
assert_eq!("metric1", req1.table_name);
let columns = &req1.columns_values;
@@ -593,6 +599,7 @@ mod tests {
assert_vector(&expected, val);
let req2 = reqs.get(1).unwrap();
+ assert_eq!("public", req2.schema_name);
assert_eq!("metric2", req2.table_name);
let columns = &req2.columns_values;
@@ -616,6 +623,7 @@ mod tests {
assert_vector(&expected, val);
let req3 = reqs.get(2).unwrap();
+ assert_eq!("public", req3.schema_name);
assert_eq!("metric3", req3.table_name);
let columns = &req3.columns_values;
@@ -654,8 +662,11 @@ mod tests {
..Default::default()
};
- let exprs = write_request_to_insert_exprs(write_request).unwrap();
+ let exprs = write_request_to_insert_exprs("prometheus", write_request).unwrap();
assert_eq!(3, exprs.len());
+ assert_eq!("prometheus", exprs[0].schema_name);
+ assert_eq!("prometheus", exprs[1].schema_name);
+ assert_eq!("prometheus", exprs[2].schema_name);
assert_eq!("metric1", exprs[0].table_name);
assert_eq!("metric2", exprs[1].table_name);
assert_eq!("metric3", exprs[2].table_name);
diff --git a/src/servers/src/query_handler.rs b/src/servers/src/query_handler.rs
index 46b194169ba4..e466f612141e 100644
--- a/src/servers/src/query_handler.rs
+++ b/src/servers/src/query_handler.rs
@@ -67,9 +67,9 @@ pub struct PrometheusResponse {
#[async_trait]
pub trait PrometheusProtocolHandler {
/// Handling prometheus remote write requests
- async fn write(&self, request: WriteRequest) -> Result<()>;
+ async fn write(&self, database: &str, request: WriteRequest) -> Result<()>;
/// Handling prometheus remote read requests
- async fn read(&self, request: ReadRequest) -> Result<PrometheusResponse>;
+ async fn read(&self, database: &str, request: ReadRequest) -> Result<PrometheusResponse>;
/// Handling push gateway requests
async fn ingest_metrics(&self, metrics: Metrics) -> Result<()>;
}
diff --git a/src/servers/tests/http/influxdb_test.rs b/src/servers/tests/http/influxdb_test.rs
index fe75c5bae526..e86f9b1cd246 100644
--- a/src/servers/tests/http/influxdb_test.rs
+++ b/src/servers/tests/http/influxdb_test.rs
@@ -12,7 +12,7 @@ use servers::query_handler::{InfluxdbLineProtocolHandler, SqlQueryHandler};
use tokio::sync::mpsc;
struct DummyInstance {
- tx: mpsc::Sender<String>,
+ tx: mpsc::Sender<(String, String)>,
}
#[async_trait]
@@ -21,7 +21,7 @@ impl InfluxdbLineProtocolHandler for DummyInstance {
let exprs: Vec<InsertExpr> = request.try_into()?;
for expr in exprs {
- let _ = self.tx.send(expr.table_name).await;
+ let _ = self.tx.send((expr.schema_name, expr.table_name)).await;
}
Ok(())
@@ -43,7 +43,7 @@ impl SqlQueryHandler for DummyInstance {
}
}
-fn make_test_app(tx: mpsc::Sender<String>) -> Router {
+fn make_test_app(tx: mpsc::Sender<(String, String)>) -> Router {
let instance = Arc::new(DummyInstance { tx });
let mut server = HttpServer::new(instance.clone());
server.set_influxdb_handler(instance);
@@ -66,6 +66,14 @@ async fn test_influxdb_write() {
assert_eq!(result.status(), 204);
assert!(result.text().await.is_empty());
+ let result = client
+ .post("/v1/influxdb/write?db=influxdb")
+ .body("monitor,host=host1 cpu=1.2 1664370459457010101")
+ .send()
+ .await;
+ assert_eq!(result.status(), 204);
+ assert!(result.text().await.is_empty());
+
// bad request
let result = client
.post("/v1/influxdb/write")
@@ -79,5 +87,11 @@ async fn test_influxdb_write() {
while let Ok(s) = rx.try_recv() {
metrics.push(s);
}
- assert_eq!(metrics, vec!["monitor".to_string()]);
+ assert_eq!(
+ metrics,
+ vec![
+ ("public".to_string(), "monitor".to_string()),
+ ("influxdb".to_string(), "monitor".to_string())
+ ]
+ );
}
diff --git a/src/servers/tests/http/prometheus_test.rs b/src/servers/tests/http/prometheus_test.rs
index 3415110be745..ce85c56c8ca1 100644
--- a/src/servers/tests/http/prometheus_test.rs
+++ b/src/servers/tests/http/prometheus_test.rs
@@ -17,18 +17,24 @@ use servers::query_handler::{PrometheusProtocolHandler, PrometheusResponse, SqlQ
use tokio::sync::mpsc;
struct DummyInstance {
- tx: mpsc::Sender<Vec<u8>>,
+ tx: mpsc::Sender<(String, Vec<u8>)>,
}
#[async_trait]
impl PrometheusProtocolHandler for DummyInstance {
- async fn write(&self, request: WriteRequest) -> Result<()> {
- let _ = self.tx.send(request.encode_to_vec()).await;
+ async fn write(&self, db: &str, request: WriteRequest) -> Result<()> {
+ let _ = self
+ .tx
+ .send((db.to_string(), request.encode_to_vec()))
+ .await;
Ok(())
}
- async fn read(&self, request: ReadRequest) -> Result<PrometheusResponse> {
- let _ = self.tx.send(request.encode_to_vec()).await;
+ async fn read(&self, db: &str, request: ReadRequest) -> Result<PrometheusResponse> {
+ let _ = self
+ .tx
+ .send((db.to_string(), request.encode_to_vec()))
+ .await;
let response = ReadResponse {
results: vec![QueryResult {
@@ -63,7 +69,7 @@ impl SqlQueryHandler for DummyInstance {
}
}
-fn make_test_app(tx: mpsc::Sender<Vec<u8>>) -> Router {
+fn make_test_app(tx: mpsc::Sender<(String, Vec<u8>)>) -> Router {
let instance = Arc::new(DummyInstance { tx });
let mut server = HttpServer::new(instance.clone());
server.set_prom_handler(instance);
@@ -82,6 +88,7 @@ async fn test_prometheus_remote_write_read() {
..Default::default()
};
+ // Write to public database
let result = client
.post("/v1/prometheus/write")
.body(snappy_compress(&write_request.clone().encode_to_vec()[..]).unwrap())
@@ -89,6 +96,14 @@ async fn test_prometheus_remote_write_read() {
.await;
assert_eq!(result.status(), 204);
assert!(result.text().await.is_empty());
+ // Write to prometheus database
+ let result = client
+ .post("/v1/prometheus/write?db=prometheus")
+ .body(snappy_compress(&write_request.clone().encode_to_vec()[..]).unwrap())
+ .send()
+ .await;
+ assert_eq!(result.status(), 204);
+ assert!(result.text().await.is_empty());
let read_request = ReadRequest {
queries: vec![Query {
@@ -104,8 +119,9 @@ async fn test_prometheus_remote_write_read() {
..Default::default()
};
+ // Read from prometheus database
let mut result = client
- .post("/v1/prometheus/read")
+ .post("/v1/prometheus/read?db=prometheus")
.body(snappy_compress(&read_request.clone().encode_to_vec()[..]).unwrap())
.send()
.await;
@@ -127,16 +143,41 @@ async fn test_prometheus_remote_write_read() {
prometheus::mock_timeseries()
);
- let mut requests = vec![];
+ // Read from public database
+ let result = client
+ .post("/v1/prometheus/read")
+ .body(snappy_compress(&read_request.clone().encode_to_vec()[..]).unwrap())
+ .send()
+ .await;
+ assert_eq!(result.status(), 200);
+
+ let mut requests: Vec<(String, Vec<u8>)> = vec![];
while let Ok(s) = rx.try_recv() {
requests.push(s);
}
- assert_eq!(2, requests.len());
+ assert_eq!(4, requests.len());
+
+ assert_eq!("public", requests[0].0);
+ assert_eq!("prometheus", requests[1].0);
+ assert_eq!("prometheus", requests[2].0);
+ assert_eq!("public", requests[3].0);
assert_eq!(
write_request,
- WriteRequest::decode(&requests[0][..]).unwrap()
+ WriteRequest::decode(&(requests[0].1)[..]).unwrap()
+ );
+ assert_eq!(
+ write_request,
+ WriteRequest::decode(&(requests[1].1)[..]).unwrap()
+ );
+
+ assert_eq!(
+ read_request,
+ ReadRequest::decode(&(requests[2].1)[..]).unwrap()
+ );
+ assert_eq!(
+ read_request,
+ ReadRequest::decode(&(requests[3].1)[..]).unwrap()
);
- assert_eq!(read_request, ReadRequest::decode(&requests[1][..]).unwrap());
}
diff --git a/src/table-engine/src/engine.rs b/src/table-engine/src/engine.rs
index da0fe797f215..8ebbbf2938dc 100644
--- a/src/table-engine/src/engine.rs
+++ b/src/table-engine/src/engine.rs
@@ -3,6 +3,7 @@ use std::sync::Arc;
use std::sync::RwLock;
use async_trait::async_trait;
+use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_error::ext::BoxedError;
use common_telemetry::logging;
use datatypes::schema::SchemaRef;
@@ -13,7 +14,7 @@ use store_api::storage::{
CreateOptions, EngineContext as StorageEngineContext, OpenOptions, RegionDescriptorBuilder,
RegionId, RowKeyDescriptor, RowKeyDescriptorBuilder, StorageEngine,
};
-use table::engine::{EngineContext, TableEngine};
+use table::engine::{EngineContext, TableEngine, TableReference};
use table::requests::{AlterTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest};
use table::Result as TableResult;
use table::{
@@ -46,8 +47,8 @@ fn region_id(table_id: TableId, n: u32) -> RegionId {
}
#[inline]
-fn table_dir(table_name: &str) -> String {
- format!("{}/", table_name)
+fn table_dir(schema_name: &str, table_name: &str) -> String {
+ format!("{}/{}/", schema_name, table_name)
}
/// [TableEngine] implementation.
@@ -97,12 +98,16 @@ impl<S: StorageEngine> TableEngine for MitoEngine<S> {
Ok(self.inner.alter_table(ctx, req).await?)
}
- fn get_table(&self, _ctx: &EngineContext, name: &str) -> TableResult<Option<TableRef>> {
- Ok(self.inner.get_table(name))
+ fn get_table<'a>(
+ &self,
+ _ctx: &EngineContext,
+ table_ref: &'a TableReference,
+ ) -> TableResult<Option<TableRef>> {
+ Ok(self.inner.get_table(table_ref))
}
- fn table_exists(&self, _ctx: &EngineContext, name: &str) -> bool {
- self.inner.get_table(name).is_some()
+ fn table_exists<'a>(&self, _ctx: &EngineContext, table_ref: &'a TableReference) -> bool {
+ self.inner.get_table(table_ref).is_some()
}
async fn drop_table(
@@ -114,7 +119,6 @@ impl<S: StorageEngine> TableEngine for MitoEngine<S> {
}
}
-/// FIXME(dennis) impl system catalog to keep table metadata.
struct MitoEngineInner<S: StorageEngine> {
/// All tables opened by the engine.
///
@@ -243,8 +247,13 @@ impl<S: StorageEngine> MitoEngineInner<S> {
let catalog_name = &request.catalog_name;
let schema_name = &request.schema_name;
let table_name = &request.table_name;
+ let table_ref = TableReference {
+ catalog: catalog_name,
+ schema: schema_name,
+ table: table_name,
+ };
- if let Some(table) = self.get_table(table_name) {
+ if let Some(table) = self.get_table(&table_ref) {
if request.create_if_not_exists {
return Ok(table);
} else {
@@ -290,7 +299,7 @@ impl<S: StorageEngine> MitoEngineInner<S> {
let _lock = self.table_mutex.lock().await;
// Checks again, read lock should be enough since we are guarded by the mutex.
- if let Some(table) = self.get_table(table_name) {
+ if let Some(table) = self.get_table(&table_ref) {
if request.create_if_not_exists {
return Ok(table);
} else {
@@ -298,8 +307,9 @@ impl<S: StorageEngine> MitoEngineInner<S> {
}
}
+ let table_dir = table_dir(schema_name, table_name);
let opts = CreateOptions {
- parent_dir: table_dir(table_name),
+ parent_dir: table_dir.clone(),
};
let region = self
@@ -329,7 +339,14 @@ impl<S: StorageEngine> MitoEngineInner<S> {
.context(error::BuildTableInfoSnafu { table_name })?;
let table = Arc::new(
- MitoTable::create(table_name, table_info, region, self.object_store.clone()).await?,
+ MitoTable::create(
+ table_name,
+ &table_dir,
+ table_info,
+ region,
+ self.object_store.clone(),
+ )
+ .await?,
);
logging::info!("Mito engine created table: {:?}.", table.table_info());
@@ -337,19 +354,26 @@ impl<S: StorageEngine> MitoEngineInner<S> {
self.tables
.write()
.unwrap()
- .insert(table_name.clone(), table.clone());
+ .insert(table_ref.to_string(), table.clone());
Ok(table)
}
- // TODO(yingwen): Support catalog and schema name.
async fn open_table(
&self,
_ctx: &EngineContext,
request: OpenTableRequest,
) -> TableResult<Option<TableRef>> {
+ let catalog_name = &request.catalog_name;
+ let schema_name = &request.schema_name;
let table_name = &request.table_name;
- if let Some(table) = self.get_table(table_name) {
+ let table_ref = TableReference {
+ catalog: catalog_name,
+ schema: schema_name,
+ table: table_name,
+ };
+
+ if let Some(table) = self.get_table(&table_ref) {
// Table has already been opened.
return Ok(Some(table));
}
@@ -358,13 +382,14 @@ impl<S: StorageEngine> MitoEngineInner<S> {
let table = {
let _lock = self.table_mutex.lock().await;
// Checks again, read lock should be enough since we are guarded by the mutex.
- if let Some(table) = self.get_table(table_name) {
+ if let Some(table) = self.get_table(&table_ref) {
return Ok(Some(table));
}
let engine_ctx = StorageEngineContext::default();
+ let table_dir = table_dir(schema_name, table_name);
let opts = OpenOptions {
- parent_dir: table_dir(table_name),
+ parent_dir: table_dir.to_string(),
};
let table_id = request.table_id;
@@ -383,13 +408,14 @@ impl<S: StorageEngine> MitoEngineInner<S> {
Some(region) => region,
};
- let table =
- Arc::new(MitoTable::open(table_name, region, self.object_store.clone()).await?);
+ let table = Arc::new(
+ MitoTable::open(table_name, &table_dir, region, self.object_store.clone()).await?,
+ );
self.tables
.write()
.unwrap()
- .insert(table_name.to_string(), table.clone());
+ .insert(table_ref.to_string(), table.clone());
Some(table as _)
};
@@ -398,14 +424,26 @@ impl<S: StorageEngine> MitoEngineInner<S> {
Ok(table)
}
- fn get_table(&self, name: &str) -> Option<TableRef> {
- self.tables.read().unwrap().get(name).cloned()
+ fn get_table<'a>(&self, table_ref: &'a TableReference) -> Option<TableRef> {
+ self.tables
+ .read()
+ .unwrap()
+ .get(&table_ref.to_string())
+ .cloned()
}
async fn alter_table(&self, _ctx: &EngineContext, req: AlterTableRequest) -> Result<TableRef> {
+ let catalog_name = req.catalog_name.as_deref().unwrap_or(DEFAULT_CATALOG_NAME);
+ let schema_name = req.schema_name.as_deref().unwrap_or(DEFAULT_SCHEMA_NAME);
let table_name = &req.table_name.clone();
+
+ let table_ref = TableReference {
+ catalog: catalog_name,
+ schema: schema_name,
+ table: table_name,
+ };
let table = self
- .get_table(table_name)
+ .get_table(&table_ref)
.context(error::TableNotFoundSnafu { table_name })?;
logging::info!("start altering table {} with request {:?}", table_name, req);
@@ -585,8 +623,8 @@ mod tests {
#[test]
fn test_table_dir() {
- assert_eq!("test_table/", table_dir("test_table"));
- assert_eq!("demo/", table_dir("demo"));
+ assert_eq!("public/test_table/", table_dir("public", "test_table"));
+ assert_eq!("prometheus/demo/", table_dir("prometheus", "demo"));
}
#[tokio::test]
@@ -771,8 +809,8 @@ mod tests {
let ctx = EngineContext::default();
let open_req = OpenTableRequest {
- catalog_name: String::new(),
- schema_name: String::new(),
+ catalog_name: DEFAULT_CATALOG_NAME.to_string(),
+ schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name: test_util::TABLE_NAME.to_string(),
// the test table id is 1
table_id: 1,
diff --git a/src/table-engine/src/table.rs b/src/table-engine/src/table.rs
index b61722ea633c..0e1c9b2003e3 100644
--- a/src/table-engine/src/table.rs
+++ b/src/table-engine/src/table.rs
@@ -41,8 +41,8 @@ use crate::manifest::action::*;
use crate::manifest::TableManifest;
#[inline]
-fn table_manifest_dir(table_name: &str) -> String {
- format!("{}/manifest/", table_name)
+fn table_manifest_dir(table_dir: &str) -> String {
+ format!("{}/manifest/", table_dir)
}
/// [Table] implementation.
@@ -341,11 +341,12 @@ impl<R: Region> MitoTable<R> {
pub async fn create(
table_name: &str,
+ table_dir: &str,
table_info: TableInfo,
region: R,
object_store: ObjectStore,
) -> Result<MitoTable<R>> {
- let manifest = TableManifest::new(&table_manifest_dir(table_name), object_store);
+ let manifest = TableManifest::new(&table_manifest_dir(table_dir), object_store);
// TODO(dennis): save manifest version into catalog?
let _manifest_version = manifest
@@ -377,10 +378,11 @@ impl<R: Region> MitoTable<R> {
pub async fn open(
table_name: &str,
+ table_dir: &str,
region: R,
object_store: ObjectStore,
) -> Result<MitoTable<R>> {
- let manifest = TableManifest::new(&table_manifest_dir(table_name), object_store);
+ let manifest = TableManifest::new(&table_manifest_dir(table_dir), object_store);
let mut table_info = Self::recover_table_info(table_name, &manifest)
.await?
diff --git a/src/table/src/engine.rs b/src/table/src/engine.rs
index db418f2529b8..a2ed36bbc7b5 100644
--- a/src/table/src/engine.rs
+++ b/src/table/src/engine.rs
@@ -1,9 +1,23 @@
+use std::fmt::{self, Display};
use std::sync::Arc;
use crate::error::Result;
use crate::requests::{AlterTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest};
use crate::TableRef;
+/// Represents a resolved path to a table of the form “catalog.schema.table”
+pub struct TableReference<'a> {
+ pub catalog: &'a str,
+ pub schema: &'a str,
+ pub table: &'a str,
+}
+
+impl<'a> Display for TableReference<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "{}.{}.{}", self.catalog, self.schema, self.table)
+ }
+}
+
/// Table engine abstraction.
#[async_trait::async_trait]
pub trait TableEngine: Send + Sync {
@@ -37,11 +51,14 @@ pub trait TableEngine: Send + Sync {
) -> Result<TableRef>;
/// Returns the table by it's name.
- fn get_table(&self, ctx: &EngineContext, name: &str) -> Result<Option<TableRef>>;
+ fn get_table<'a>(
+ &self,
+ ctx: &EngineContext,
+ table_ref: &'a TableReference,
+ ) -> Result<Option<TableRef>>;
/// Returns true when the given table is exists.
- /// TODO(hl): support catalog and schema
- fn table_exists(&self, ctx: &EngineContext, name: &str) -> bool;
+ fn table_exists<'a>(&self, ctx: &EngineContext, table_ref: &'a TableReference) -> bool;
/// Drops the given table.
async fn drop_table(&self, ctx: &EngineContext, request: DropTableRequest) -> Result<()>;
@@ -52,3 +69,19 @@ pub type TableEngineRef = Arc<dyn TableEngine>;
/// Storage engine context.
#[derive(Debug, Clone, Default)]
pub struct EngineContext {}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_table_reference() {
+ let table_ref = TableReference {
+ catalog: "greptime",
+ schema: "public",
+ table: "test",
+ };
+
+ assert_eq!("greptime.public.test", table_ref.to_string());
+ }
+}
diff --git a/src/table/src/test_util/mock_engine.rs b/src/table/src/test_util/mock_engine.rs
index ff05380a6331..75c038f356e2 100644
--- a/src/table/src/test_util/mock_engine.rs
+++ b/src/table/src/test_util/mock_engine.rs
@@ -6,7 +6,7 @@ use tokio::sync::Mutex;
use crate::test_util::EmptyTable;
use crate::{
- engine::{EngineContext, TableEngine},
+ engine::{EngineContext, TableEngine, TableReference},
requests::{AlterTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest},
Result, TableRef,
};
@@ -73,11 +73,15 @@ impl TableEngine for MockTableEngine {
unimplemented!()
}
- fn get_table(&self, _ctx: &EngineContext, _name: &str) -> Result<Option<TableRef>> {
+ fn get_table<'a>(
+ &self,
+ _ctx: &EngineContext,
+ _ref: &'a TableReference,
+ ) -> Result<Option<TableRef>> {
unimplemented!()
}
- fn table_exists(&self, _ctx: &EngineContext, _name: &str) -> bool {
+ fn table_exists<'a>(&self, _ctx: &EngineContext, _name: &'a TableReference) -> bool {
unimplemented!()
}
|
fix
|
table and database conflicts (#491)
|
8bd4a36136b995e74c29660cfbaaf9d4882766ad
|
2024-01-09 10:10:22
|
Yingwen
|
feat(mito): Init the write cache in datanode (#3100)
| false
|
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index 69a59d0accc7..3f0990cb49ad 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -42,7 +42,7 @@ use metric_engine::engine::MetricEngine;
use mito2::config::MitoConfig;
use mito2::engine::MitoEngine;
use object_store::manager::{ObjectStoreManager, ObjectStoreManagerRef};
-use object_store::util::normalize_dir;
+use object_store::util::{join_dir, normalize_dir};
use query::QueryEngineFactory;
use servers::export_metrics::ExportMetricsTask;
use servers::grpc::{GrpcServer, GrpcServerConfig};
@@ -60,9 +60,9 @@ use tokio::sync::Notify;
use crate::config::{DatanodeOptions, RegionEngineConfig};
use crate::error::{
- CreateDirSnafu, GetMetadataSnafu, MissingKvBackendSnafu, MissingNodeIdSnafu, OpenLogStoreSnafu,
- ParseAddrSnafu, Result, RuntimeResourceSnafu, ShutdownInstanceSnafu, ShutdownServerSnafu,
- StartServerSnafu,
+ BuildMitoEngineSnafu, CreateDirSnafu, GetMetadataSnafu, MissingKvBackendSnafu,
+ MissingNodeIdSnafu, OpenLogStoreSnafu, ParseAddrSnafu, Result, RuntimeResourceSnafu,
+ ShutdownInstanceSnafu, ShutdownServerSnafu, StartServerSnafu,
};
use crate::event_listener::{
new_region_server_event_channel, NoopRegionServerEventListener, RegionServerEventListenerRef,
@@ -458,20 +458,33 @@ impl DatanodeBuilder {
async fn build_mito_engine(
opts: &DatanodeOptions,
object_store_manager: ObjectStoreManagerRef,
- config: MitoConfig,
+ mut config: MitoConfig,
) -> Result<MitoEngine> {
+ // Sets write cache path if it is empty.
+ if config.experimental_write_cache_path.is_empty() {
+ config.experimental_write_cache_path = join_dir(&opts.storage.data_home, "write_cache");
+ info!(
+ "Sets write cache path to {}",
+ config.experimental_write_cache_path
+ );
+ }
+
let mito_engine = match &opts.wal {
WalConfig::RaftEngine(raft_engine_config) => MitoEngine::new(
config,
Self::build_raft_engine_log_store(&opts.storage.data_home, raft_engine_config)
.await?,
object_store_manager,
- ),
+ )
+ .await
+ .context(BuildMitoEngineSnafu)?,
WalConfig::Kafka(kafka_config) => MitoEngine::new(
config,
Self::build_kafka_log_store(kafka_config).await?,
object_store_manager,
- ),
+ )
+ .await
+ .context(BuildMitoEngineSnafu)?,
};
Ok(mito_engine)
}
diff --git a/src/datanode/src/error.rs b/src/datanode/src/error.rs
index 94724ffb95a0..d7873812cf5d 100644
--- a/src/datanode/src/error.rs
+++ b/src/datanode/src/error.rs
@@ -282,6 +282,12 @@ pub enum Error {
source: metric_engine::error::Error,
location: Location,
},
+
+ #[snafu(display("Failed to build mito engine"))]
+ BuildMitoEngine {
+ source: mito2::error::Error,
+ location: Location,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -352,6 +358,7 @@ impl ErrorExt for Error {
StopRegionEngine { source, .. } => source.status_code(),
FindLogicalRegions { source, .. } => source.status_code(),
+ BuildMitoEngine { source, .. } => source.status_code(),
}
}
diff --git a/src/datanode/src/store.rs b/src/datanode/src/store.rs
index 6b6eac2a09ae..e748aa5a2164 100644
--- a/src/datanode/src/store.rs
+++ b/src/datanode/src/store.rs
@@ -26,10 +26,10 @@ use std::{env, path};
use common_base::readable_size::ReadableSize;
use common_telemetry::logging::info;
-use object_store::layers::{LoggingLayer, LruCacheLayer, RetryLayer, TracingLayer};
-use object_store::services::Fs as FsBuilder;
-use object_store::util::normalize_dir;
-use object_store::{util, HttpClient, ObjectStore, ObjectStoreBuilder};
+use object_store::layers::{LruCacheLayer, RetryLayer};
+use object_store::services::Fs;
+use object_store::util::{join_dir, normalize_dir, with_instrument_layers};
+use object_store::{HttpClient, ObjectStore, ObjectStoreBuilder};
use snafu::prelude::*;
use crate::config::{ObjectStoreConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE};
@@ -60,16 +60,7 @@ pub(crate) async fn new_object_store(
object_store
};
- let store = object_store
- .layer(
- LoggingLayer::default()
- // Print the expected error only in DEBUG level.
- // See https://docs.rs/opendal/latest/opendal/layers/struct.LoggingLayer.html#method.with_error_level
- .with_error_level(Some("debug"))
- .expect("input error level must be valid"),
- )
- .layer(TracingLayer)
- .layer(object_store::layers::PrometheusMetricsLayer);
+ let store = with_instrument_layers(object_store);
Ok(store)
}
@@ -114,11 +105,10 @@ async fn create_object_store_with_cache(
};
if let Some(path) = cache_path {
- let path = util::normalize_dir(path);
- let atomic_temp_dir = format!("{path}.tmp/");
+ let atomic_temp_dir = join_dir(path, ".tmp/");
clean_temp_dir(&atomic_temp_dir)?;
- let cache_store = FsBuilder::default()
- .root(&path)
+ let cache_store = Fs::default()
+ .root(path)
.atomic_write_dir(&atomic_temp_dir)
.build()
.context(error::InitBackendSnafu)?;
diff --git a/src/datanode/src/store/azblob.rs b/src/datanode/src/store/azblob.rs
index 53ea66a83d28..dedd473a72ac 100644
--- a/src/datanode/src/store/azblob.rs
+++ b/src/datanode/src/store/azblob.rs
@@ -13,7 +13,7 @@
// limitations under the License.
use common_telemetry::logging::info;
-use object_store::services::Azblob as AzureBuilder;
+use object_store::services::Azblob;
use object_store::{util, ObjectStore};
use secrecy::ExposeSecret;
use snafu::prelude::*;
@@ -30,7 +30,7 @@ pub(crate) async fn new_azblob_object_store(azblob_config: &AzblobConfig) -> Res
azblob_config.container, &root
);
- let mut builder = AzureBuilder::default();
+ let mut builder = Azblob::default();
let _ = builder
.root(&root)
.container(&azblob_config.container)
diff --git a/src/datanode/src/store/fs.rs b/src/datanode/src/store/fs.rs
index 51f1c79a833d..e3a8513c6dd2 100644
--- a/src/datanode/src/store/fs.rs
+++ b/src/datanode/src/store/fs.rs
@@ -15,7 +15,8 @@
use std::{fs, path};
use common_telemetry::logging::info;
-use object_store::services::Fs as FsBuilder;
+use object_store::services::Fs;
+use object_store::util::join_dir;
use object_store::ObjectStore;
use snafu::prelude::*;
@@ -31,10 +32,10 @@ pub(crate) async fn new_fs_object_store(
.context(error::CreateDirSnafu { dir: data_home })?;
info!("The file storage home is: {}", data_home);
- let atomic_write_dir = format!("{data_home}.tmp/");
+ let atomic_write_dir = join_dir(data_home, ".tmp/");
store::clean_temp_dir(&atomic_write_dir)?;
- let mut builder = FsBuilder::default();
+ let mut builder = Fs::default();
let _ = builder.root(data_home).atomic_write_dir(&atomic_write_dir);
let object_store = ObjectStore::new(builder)
diff --git a/src/datanode/src/store/gcs.rs b/src/datanode/src/store/gcs.rs
index 57e4a68c6d85..1bf0919b3cfe 100644
--- a/src/datanode/src/store/gcs.rs
+++ b/src/datanode/src/store/gcs.rs
@@ -13,7 +13,7 @@
// limitations under the License.
use common_telemetry::logging::info;
-use object_store::services::Gcs as GCSBuilder;
+use object_store::services::Gcs;
use object_store::{util, ObjectStore};
use secrecy::ExposeSecret;
use snafu::prelude::*;
@@ -29,7 +29,7 @@ pub(crate) async fn new_gcs_object_store(gcs_config: &GcsConfig) -> Result<Objec
gcs_config.bucket, &root
);
- let mut builder = GCSBuilder::default();
+ let mut builder = Gcs::default();
builder
.root(&root)
.bucket(&gcs_config.bucket)
diff --git a/src/datanode/src/store/oss.rs b/src/datanode/src/store/oss.rs
index 3278b83d7396..a311de9b5df2 100644
--- a/src/datanode/src/store/oss.rs
+++ b/src/datanode/src/store/oss.rs
@@ -13,7 +13,7 @@
// limitations under the License.
use common_telemetry::logging::info;
-use object_store::services::Oss as OSSBuilder;
+use object_store::services::Oss;
use object_store::{util, ObjectStore};
use secrecy::ExposeSecret;
use snafu::prelude::*;
@@ -29,7 +29,7 @@ pub(crate) async fn new_oss_object_store(oss_config: &OssConfig) -> Result<Objec
oss_config.bucket, &root
);
- let mut builder = OSSBuilder::default();
+ let mut builder = Oss::default();
let _ = builder
.root(&root)
.bucket(&oss_config.bucket)
diff --git a/src/datanode/src/store/s3.rs b/src/datanode/src/store/s3.rs
index 5c67cd6def7e..9b3e376f8461 100644
--- a/src/datanode/src/store/s3.rs
+++ b/src/datanode/src/store/s3.rs
@@ -13,7 +13,7 @@
// limitations under the License.
use common_telemetry::logging::info;
-use object_store::services::S3 as S3Builder;
+use object_store::services::S3;
use object_store::{util, ObjectStore};
use secrecy::ExposeSecret;
use snafu::prelude::*;
@@ -30,7 +30,7 @@ pub(crate) async fn new_s3_object_store(s3_config: &S3Config) -> Result<ObjectSt
s3_config.bucket, &root
);
- let mut builder = S3Builder::default();
+ let mut builder = S3::default();
let _ = builder
.root(&root)
.bucket(&s3_config.bucket)
diff --git a/src/mito2/src/access_layer.rs b/src/mito2/src/access_layer.rs
index 2e22da087c6b..a3ebf5c994bf 100644
--- a/src/mito2/src/access_layer.rs
+++ b/src/mito2/src/access_layer.rs
@@ -14,13 +14,15 @@
use std::sync::Arc;
+use object_store::services::Fs;
+use object_store::util::{join_dir, with_instrument_layers};
use object_store::ObjectStore;
use snafu::ResultExt;
use store_api::metadata::RegionMetadataRef;
use crate::cache::write_cache::SstUploadRequest;
use crate::cache::CacheManagerRef;
-use crate::error::{DeleteSstSnafu, Result};
+use crate::error::{CleanDirSnafu, DeleteSstSnafu, OpenDalSnafu, Result};
use crate::read::Source;
use crate::sst::file::{FileHandle, FileId};
use crate::sst::location;
@@ -119,3 +121,31 @@ pub(crate) struct SstWriteRequest {
pub(crate) cache_manager: CacheManagerRef,
pub(crate) storage: Option<String>,
}
+
+/// Creates a fs object store with atomic write dir.
+pub(crate) async fn new_fs_object_store(root: &str) -> Result<ObjectStore> {
+ let atomic_write_dir = join_dir(root, ".tmp/");
+ clean_dir(&atomic_write_dir).await?;
+
+ let mut builder = Fs::default();
+ builder.root(root).atomic_write_dir(&atomic_write_dir);
+ let object_store = ObjectStore::new(builder).context(OpenDalSnafu)?.finish();
+
+ // Add layers.
+ let object_store = with_instrument_layers(object_store);
+ Ok(object_store)
+}
+
+/// Clean the directory.
+async fn clean_dir(dir: &str) -> Result<()> {
+ if tokio::fs::try_exists(dir)
+ .await
+ .context(CleanDirSnafu { dir })?
+ {
+ tokio::fs::remove_dir_all(dir)
+ .await
+ .context(CleanDirSnafu { dir })?;
+ }
+
+ Ok(())
+}
diff --git a/src/mito2/src/cache.rs b/src/mito2/src/cache.rs
index 62c91b2b4155..9372d57b3a80 100644
--- a/src/mito2/src/cache.rs
+++ b/src/mito2/src/cache.rs
@@ -47,9 +47,10 @@ const PAGE_TYPE: &str = "page";
// Metrics type key for files on the local store.
const FILE_TYPE: &str = "file";
-// TODO(yingwen): Builder for cache manager.
-
/// Manages cached data for the engine.
+///
+/// All caches are disabled by default.
+#[derive(Default)]
pub struct CacheManager {
/// Cache for SST metadata.
sst_meta_cache: Option<SstMetaCache>,
@@ -58,70 +59,15 @@ pub struct CacheManager {
/// Cache for SST pages.
page_cache: Option<PageCache>,
/// A Cache for writing files to object stores.
- // TODO(yingwen): Remove this once the cache is ready.
- #[allow(unused)]
write_cache: Option<WriteCacheRef>,
}
pub type CacheManagerRef = Arc<CacheManager>;
impl CacheManager {
- /// Creates a new manager with specific cache size in bytes.
- pub fn new(
- sst_meta_cache_size: u64,
- vector_cache_size: u64,
- page_cache_size: u64,
- ) -> CacheManager {
- let sst_meta_cache = if sst_meta_cache_size == 0 {
- None
- } else {
- let cache = Cache::builder()
- .max_capacity(sst_meta_cache_size)
- .weigher(meta_cache_weight)
- .eviction_listener(|k, v, _cause| {
- let size = meta_cache_weight(&k, &v);
- CACHE_BYTES
- .with_label_values(&[SST_META_TYPE])
- .sub(size.into());
- })
- .build();
- Some(cache)
- };
- let vector_cache = if vector_cache_size == 0 {
- None
- } else {
- let cache = Cache::builder()
- .max_capacity(vector_cache_size)
- .weigher(vector_cache_weight)
- .eviction_listener(|k, v, _cause| {
- let size = vector_cache_weight(&k, &v);
- CACHE_BYTES
- .with_label_values(&[VECTOR_TYPE])
- .sub(size.into());
- })
- .build();
- Some(cache)
- };
- let page_cache = if page_cache_size == 0 {
- None
- } else {
- let cache = Cache::builder()
- .max_capacity(page_cache_size)
- .weigher(page_cache_weight)
- .eviction_listener(|k, v, _cause| {
- let size = page_cache_weight(&k, &v);
- CACHE_BYTES.with_label_values(&[PAGE_TYPE]).sub(size.into());
- })
- .build();
- Some(cache)
- };
-
- CacheManager {
- sst_meta_cache,
- vector_cache,
- page_cache,
- write_cache: None,
- }
+ /// Returns a builder to build the cache.
+ pub fn builder() -> CacheManagerBuilder {
+ CacheManagerBuilder::default()
}
/// Gets cached [ParquetMetaData].
@@ -201,6 +147,86 @@ impl CacheManager {
}
}
+/// Builder to construct a [CacheManager].
+#[derive(Default)]
+pub struct CacheManagerBuilder {
+ sst_meta_cache_size: u64,
+ vector_cache_size: u64,
+ page_cache_size: u64,
+ write_cache: Option<WriteCacheRef>,
+}
+
+impl CacheManagerBuilder {
+ /// Sets meta cache size.
+ pub fn sst_meta_cache_size(mut self, bytes: u64) -> Self {
+ self.sst_meta_cache_size = bytes;
+ self
+ }
+
+ /// Sets vector cache size.
+ pub fn vector_cache_size(mut self, bytes: u64) -> Self {
+ self.vector_cache_size = bytes;
+ self
+ }
+
+ /// Sets page cache size.
+ pub fn page_cache_size(mut self, bytes: u64) -> Self {
+ self.page_cache_size = bytes;
+ self
+ }
+
+ /// Sets write cache.
+ pub fn write_cache(mut self, cache: Option<WriteCacheRef>) -> Self {
+ self.write_cache = cache;
+ self
+ }
+
+ /// Builds the [CacheManager].
+ pub fn build(self) -> CacheManager {
+ let sst_meta_cache = (self.sst_meta_cache_size != 0).then(|| {
+ Cache::builder()
+ .max_capacity(self.sst_meta_cache_size)
+ .weigher(meta_cache_weight)
+ .eviction_listener(|k, v, _cause| {
+ let size = meta_cache_weight(&k, &v);
+ CACHE_BYTES
+ .with_label_values(&[SST_META_TYPE])
+ .sub(size.into());
+ })
+ .build()
+ });
+ let vector_cache = (self.vector_cache_size != 0).then(|| {
+ Cache::builder()
+ .max_capacity(self.vector_cache_size)
+ .weigher(vector_cache_weight)
+ .eviction_listener(|k, v, _cause| {
+ let size = vector_cache_weight(&k, &v);
+ CACHE_BYTES
+ .with_label_values(&[VECTOR_TYPE])
+ .sub(size.into());
+ })
+ .build()
+ });
+ let page_cache = (self.page_cache_size != 0).then(|| {
+ Cache::builder()
+ .max_capacity(self.page_cache_size)
+ .weigher(page_cache_weight)
+ .eviction_listener(|k, v, _cause| {
+ let size = page_cache_weight(&k, &v);
+ CACHE_BYTES.with_label_values(&[PAGE_TYPE]).sub(size.into());
+ })
+ .build()
+ });
+
+ CacheManager {
+ sst_meta_cache,
+ vector_cache,
+ page_cache,
+ write_cache: self.write_cache,
+ }
+ }
+}
+
fn meta_cache_weight(k: &SstMetaKey, v: &Arc<ParquetMetaData>) -> u32 {
// We ignore the size of `Arc`.
(k.estimated_size() + parquet_meta_size(v)) as u32
@@ -293,7 +319,7 @@ mod tests {
#[test]
fn test_disable_cache() {
- let cache = CacheManager::new(0, 0, 0);
+ let cache = CacheManager::default();
assert!(cache.sst_meta_cache.is_none());
assert!(cache.vector_cache.is_none());
assert!(cache.page_cache.is_none());
@@ -318,11 +344,13 @@ mod tests {
let pages = Arc::new(PageValue::new(Vec::new()));
cache.put_pages(key.clone(), pages);
assert!(cache.get_pages(&key).is_none());
+
+ assert!(cache.write_cache().is_none());
}
#[test]
fn test_parquet_meta_cache() {
- let cache = CacheManager::new(2000, 0, 0);
+ let cache = CacheManager::builder().sst_meta_cache_size(2000).build();
let region_id = RegionId::new(1, 1);
let file_id = FileId::random();
assert!(cache.get_parquet_meta_data(region_id, file_id).is_none());
@@ -335,7 +363,7 @@ mod tests {
#[test]
fn test_repeated_vector_cache() {
- let cache = CacheManager::new(0, 4096, 0);
+ let cache = CacheManager::builder().vector_cache_size(4096).build();
let value = Value::Int64(10);
assert!(cache.get_repeated_vector(&value).is_none());
let vector: VectorRef = Arc::new(Int64Vector::from_slice([10, 10, 10, 10]));
@@ -346,7 +374,7 @@ mod tests {
#[test]
fn test_page_cache() {
- let cache = CacheManager::new(0, 0, 1000);
+ let cache = CacheManager::builder().page_cache_size(1000).build();
let region_id = RegionId::new(1, 1);
let file_id = FileId::random();
let key = PageKey {
diff --git a/src/mito2/src/cache/file_cache.rs b/src/mito2/src/cache/file_cache.rs
index 86624a78f3f5..fb0c3ec10944 100644
--- a/src/mito2/src/cache/file_cache.rs
+++ b/src/mito2/src/cache/file_cache.rs
@@ -100,17 +100,11 @@ impl FileCache {
self.memory_index.insert(key, value).await;
}
- async fn get_reader(&self, file_path: &str) -> object_store::Result<Option<Reader>> {
- if self.local_store.is_exist(file_path).await? {
- Ok(Some(self.local_store.reader(file_path).await?))
- } else {
- Ok(None)
- }
- }
-
/// Reads a file from the cache.
pub(crate) async fn reader(&self, key: IndexKey) -> Option<Reader> {
- if !self.memory_index.contains_key(&key) {
+ // We must use `get()` to update the estimator of the cache.
+ // See https://docs.rs/moka/latest/moka/future/struct.Cache.html#method.contains_key
+ if self.memory_index.get(&key).await.is_none() {
CACHE_MISS.with_label_values(&[FILE_TYPE]).inc();
return None;
}
@@ -194,6 +188,14 @@ impl FileCache {
pub(crate) fn local_store(&self) -> ObjectStore {
self.local_store.clone()
}
+
+ async fn get_reader(&self, file_path: &str) -> object_store::Result<Option<Reader>> {
+ if self.local_store.is_exist(file_path).await? {
+ Ok(Some(self.local_store.reader(file_path).await?))
+ } else {
+ Ok(None)
+ }
+ }
}
/// Key of file cache index.
@@ -271,6 +273,10 @@ mod tests {
reader.read_to_string(&mut buf).await.unwrap();
assert_eq!("hello", buf);
+ // Get weighted size.
+ cache.memory_index.run_pending_tasks().await;
+ assert_eq!(5, cache.memory_index.weighted_size());
+
// Remove the file.
cache.remove(key).await;
assert!(cache.reader(key).await.is_none());
@@ -280,6 +286,7 @@ mod tests {
// The file also not exists.
assert!(!local_store.is_exist(&file_path).await.unwrap());
+ assert_eq!(0, cache.memory_index.weighted_size());
}
#[tokio::test]
@@ -321,6 +328,7 @@ mod tests {
let region_id = RegionId::new(2000, 0);
// Write N files.
let file_ids: Vec<_> = (0..10).map(|_| FileId::random()).collect();
+ let mut total_size = 0;
for (i, file_id) in file_ids.iter().enumerate() {
let key = (region_id, *file_id);
let file_path = cache.cache_file_path(key);
@@ -336,6 +344,7 @@ mod tests {
},
)
.await;
+ total_size += bytes.len();
}
// Recover the cache.
@@ -344,6 +353,10 @@ mod tests {
assert!(cache.reader((region_id, file_ids[0])).await.is_none());
cache.recover().await.unwrap();
+ // Check size.
+ cache.memory_index.run_pending_tasks().await;
+ assert_eq!(total_size, cache.memory_index.weighted_size() as usize);
+
for (i, file_id) in file_ids.iter().enumerate() {
let key = (region_id, *file_id);
let mut reader = cache.reader(key).await.unwrap();
diff --git a/src/mito2/src/cache/write_cache.rs b/src/mito2/src/cache/write_cache.rs
index b640ba896666..9775f3d79160 100644
--- a/src/mito2/src/cache/write_cache.rs
+++ b/src/mito2/src/cache/write_cache.rs
@@ -17,10 +17,12 @@
use std::sync::Arc;
use common_base::readable_size::ReadableSize;
+use common_telemetry::info;
use object_store::manager::ObjectStoreManagerRef;
use object_store::ObjectStore;
use store_api::metadata::RegionMetadataRef;
+use crate::access_layer::new_fs_object_store;
use crate::cache::file_cache::{FileCache, FileCacheRef};
use crate::error::Result;
use crate::read::Source;
@@ -43,20 +45,30 @@ pub type WriteCacheRef = Arc<WriteCache>;
impl WriteCache {
/// Create the cache with a `local_store` to cache files and a
/// `object_store_manager` for all object stores.
- pub fn new(
+ pub async fn new(
local_store: ObjectStore,
object_store_manager: ObjectStoreManagerRef,
cache_capacity: ReadableSize,
- ) -> Self {
- Self {
- file_cache: Arc::new(FileCache::new(local_store, cache_capacity)),
+ ) -> Result<Self> {
+ let file_cache = FileCache::new(local_store, cache_capacity);
+ file_cache.recover().await?;
+
+ Ok(Self {
+ file_cache: Arc::new(file_cache),
object_store_manager,
- }
+ })
}
- /// Recovers the write cache from local store.
- pub async fn recover(&self) -> Result<()> {
- self.file_cache.recover().await
+ /// Creates a write cache based on local fs.
+ pub async fn new_fs(
+ cache_dir: &str,
+ object_store_manager: ObjectStoreManagerRef,
+ cache_capacity: ReadableSize,
+ ) -> Result<Self> {
+ info!("Init write cache on {cache_dir}, capacity: {cache_capacity}");
+
+ let local_store = new_fs_object_store(cache_dir).await?;
+ Self::new(local_store, object_store_manager, cache_capacity).await
}
/// Writes SST to the cache and then uploads it to the remote object store.
diff --git a/src/mito2/src/config.rs b/src/mito2/src/config.rs
index 2e779b760260..b56c16addf79 100644
--- a/src/mito2/src/config.rs
+++ b/src/mito2/src/config.rs
@@ -19,6 +19,9 @@ use std::time::Duration;
use common_base::readable_size::ReadableSize;
use common_telemetry::warn;
use serde::{Deserialize, Serialize};
+use snafu::ensure;
+
+use crate::error::{InvalidConfigSnafu, Result};
/// Default max running background job.
const DEFAULT_MAX_BG_JOB: usize = 4;
@@ -67,6 +70,12 @@ pub struct MitoConfig {
pub vector_cache_size: ReadableSize,
/// Cache size for pages of SST row groups (default 512MB). Setting it to 0 to disable the cache.
pub page_cache_size: ReadableSize,
+ /// Whether to enable the experimental write cache.
+ pub enable_experimental_write_cache: bool,
+ /// Path for write cache.
+ pub experimental_write_cache_path: String,
+ /// Capacity for write cache.
+ pub experimental_write_cache_size: ReadableSize,
// Other configs:
/// Buffer size for SST writing.
@@ -95,6 +104,9 @@ impl Default for MitoConfig {
sst_meta_cache_size: ReadableSize::mb(128),
vector_cache_size: ReadableSize::mb(512),
page_cache_size: ReadableSize::mb(512),
+ enable_experimental_write_cache: false,
+ experimental_write_cache_path: String::new(),
+ experimental_write_cache_size: ReadableSize::mb(512),
sst_write_buffer_size: ReadableSize::mb(8),
scan_parallelism: divide_num_cpus(4),
parallel_scan_channel_size: DEFAULT_SCAN_CHANNEL_SIZE,
@@ -104,7 +116,9 @@ impl Default for MitoConfig {
impl MitoConfig {
/// Sanitize incorrect configurations.
- pub(crate) fn sanitize(&mut self) {
+ ///
+ /// Returns an error if there is a configuration that unable to sanitize.
+ pub(crate) fn sanitize(&mut self) -> Result<()> {
// Use default value if `num_workers` is 0.
if self.num_workers == 0 {
self.num_workers = divide_num_cpus(2);
@@ -149,6 +163,17 @@ impl MitoConfig {
self.parallel_scan_channel_size
);
}
+
+ if self.enable_experimental_write_cache {
+ ensure!(
+ !self.experimental_write_cache_path.is_empty(),
+ InvalidConfigSnafu {
+ reason: "experimental_write_cache_path should not be empty",
+ }
+ );
+ }
+
+ Ok(())
}
}
diff --git a/src/mito2/src/engine.rs b/src/mito2/src/engine.rs
index 2a718d88eaff..577d5131d0c1 100644
--- a/src/mito2/src/engine.rs
+++ b/src/mito2/src/engine.rs
@@ -77,16 +77,16 @@ pub struct MitoEngine {
impl MitoEngine {
/// Returns a new [MitoEngine] with specific `config`, `log_store` and `object_store`.
- pub fn new<S: LogStore>(
+ pub async fn new<S: LogStore>(
mut config: MitoConfig,
log_store: Arc<S>,
object_store_manager: ObjectStoreManagerRef,
- ) -> MitoEngine {
- config.sanitize();
+ ) -> Result<MitoEngine> {
+ config.sanitize()?;
- MitoEngine {
- inner: Arc::new(EngineInner::new(config, log_store, object_store_manager)),
- }
+ Ok(MitoEngine {
+ inner: Arc::new(EngineInner::new(config, log_store, object_store_manager).await?),
+ })
}
/// Returns true if the specific region exists.
@@ -126,16 +126,16 @@ struct EngineInner {
impl EngineInner {
/// Returns a new [EngineInner] with specific `config`, `log_store` and `object_store`.
- fn new<S: LogStore>(
+ async fn new<S: LogStore>(
config: MitoConfig,
log_store: Arc<S>,
object_store_manager: ObjectStoreManagerRef,
- ) -> EngineInner {
+ ) -> Result<EngineInner> {
let config = Arc::new(config);
- EngineInner {
- workers: WorkerGroup::start(config.clone(), log_store, object_store_manager),
+ Ok(EngineInner {
+ workers: WorkerGroup::start(config.clone(), log_store, object_store_manager).await?,
config,
- }
+ })
}
/// Stop the inner engine.
@@ -314,17 +314,17 @@ impl RegionEngine for MitoEngine {
#[cfg(any(test, feature = "test"))]
impl MitoEngine {
/// Returns a new [MitoEngine] for tests.
- pub fn new_for_test<S: LogStore>(
+ pub async fn new_for_test<S: LogStore>(
mut config: MitoConfig,
log_store: Arc<S>,
object_store_manager: ObjectStoreManagerRef,
write_buffer_manager: Option<crate::flush::WriteBufferManagerRef>,
listener: Option<crate::engine::listener::EventListenerRef>,
- ) -> MitoEngine {
- config.sanitize();
+ ) -> Result<MitoEngine> {
+ config.sanitize()?;
let config = Arc::new(config);
- MitoEngine {
+ Ok(MitoEngine {
inner: Arc::new(EngineInner {
workers: WorkerGroup::start_for_test(
config.clone(),
@@ -332,9 +332,10 @@ impl MitoEngine {
object_store_manager,
write_buffer_manager,
listener,
- ),
+ )
+ .await?,
config,
}),
- }
+ })
}
}
diff --git a/src/mito2/src/error.rs b/src/mito2/src/error.rs
index 044a4be5848d..b63068072883 100644
--- a/src/mito2/src/error.rs
+++ b/src/mito2/src/error.rs
@@ -429,35 +429,30 @@ pub enum Error {
#[snafu(display("Failed to build index applier"))]
BuildIndexApplier {
- #[snafu(source)]
source: index::inverted_index::error::Error,
location: Location,
},
#[snafu(display("Failed to convert value"))]
ConvertValue {
- #[snafu(source)]
source: datatypes::error::Error,
location: Location,
},
#[snafu(display("Failed to apply index"))]
ApplyIndex {
- #[snafu(source)]
source: index::inverted_index::error::Error,
location: Location,
},
#[snafu(display("Failed to read puffin metadata"))]
PuffinReadMetadata {
- #[snafu(source)]
source: puffin::error::Error,
location: Location,
},
#[snafu(display("Failed to read puffin blob"))]
PuffinReadBlob {
- #[snafu(source)]
source: puffin::error::Error,
location: Location,
},
@@ -467,6 +462,17 @@ pub enum Error {
blob_type: String,
location: Location,
},
+
+ #[snafu(display("Failed to clean dir {dir}"))]
+ CleanDir {
+ dir: String,
+ #[snafu(source)]
+ error: std::io::Error,
+ location: Location,
+ },
+
+ #[snafu(display("Invalid config, {reason}"))]
+ InvalidConfig { reason: String, location: Location },
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
@@ -555,6 +561,8 @@ impl ErrorExt for Error {
PuffinReadMetadata { source, .. } | PuffinReadBlob { source, .. } => {
source.status_code()
}
+ CleanDir { .. } => StatusCode::Unexpected,
+ InvalidConfig { .. } => StatusCode::InvalidArguments,
}
}
diff --git a/src/mito2/src/flush.rs b/src/mito2/src/flush.rs
index 49c68e489fa3..381fd3b8c837 100644
--- a/src/mito2/src/flush.rs
+++ b/src/mito2/src/flush.rs
@@ -743,7 +743,7 @@ mod tests {
listener: WorkerListener::default(),
engine_config: Arc::new(MitoConfig::default()),
row_group_size: None,
- cache_manager: Arc::new(CacheManager::new(0, 0, 0)),
+ cache_manager: Arc::new(CacheManager::default()),
};
task.push_sender(OptionOutputTx::from(output_tx));
scheduler
diff --git a/src/mito2/src/read/projection.rs b/src/mito2/src/read/projection.rs
index c5e6feefcbe1..5f4fd67edc3a 100644
--- a/src/mito2/src/read/projection.rs
+++ b/src/mito2/src/read/projection.rs
@@ -342,7 +342,8 @@ mod tests {
assert_eq!([0, 1, 2, 3, 4], mapper.column_ids());
assert_eq!([3, 4], mapper.batch_fields());
- let cache = CacheManager::new(0, 1024, 0);
+ // With vector cache.
+ let cache = CacheManager::builder().vector_cache_size(1024).build();
let batch = new_batch(0, &[1, 2], &[(3, 3), (4, 4)], 3);
let record_batch = mapper.convert(&batch, Some(&cache)).unwrap();
let expect = "\
diff --git a/src/mito2/src/sst/parquet.rs b/src/mito2/src/sst/parquet.rs
index 20259672e3bb..5b6a088729c5 100644
--- a/src/mito2/src/sst/parquet.rs
+++ b/src/mito2/src/sst/parquet.rs
@@ -170,7 +170,12 @@ mod tests {
.unwrap()
.unwrap();
- let cache = Some(Arc::new(CacheManager::new(0, 0, 64 * 1024 * 1024)));
+ // Enable page cache.
+ let cache = Some(Arc::new(
+ CacheManager::builder()
+ .page_cache_size(64 * 1024 * 1024)
+ .build(),
+ ));
let builder = ParquetReaderBuilder::new(FILE_DIR.to_string(), handle.clone(), object_store)
.cache(cache.clone());
for _ in 0..3 {
diff --git a/src/mito2/src/test_util.rs b/src/mito2/src/test_util.rs
index b2006098b6a3..73795744ffc0 100644
--- a/src/mito2/src/test_util.rs
+++ b/src/mito2/src/test_util.rs
@@ -137,6 +137,8 @@ impl TestEnv {
self.logstore = Some(logstore.clone());
self.object_store_manager = Some(object_store_manager.clone());
MitoEngine::new(config, logstore, object_store_manager)
+ .await
+ .unwrap()
}
/// Creates a new engine with specific config and existing logstore and object store manager.
@@ -145,6 +147,8 @@ impl TestEnv {
let object_store_manager = self.object_store_manager.as_ref().unwrap().clone();
MitoEngine::new(config, logstore, object_store_manager)
+ .await
+ .unwrap()
}
/// Creates a new engine with specific config and manager/listener under this env.
@@ -161,6 +165,8 @@ impl TestEnv {
self.logstore = Some(logstore.clone());
self.object_store_manager = Some(object_store_manager.clone());
MitoEngine::new_for_test(config, logstore, object_store_manager, manager, listener)
+ .await
+ .unwrap()
}
pub async fn create_engine_with_multiple_object_stores(
@@ -190,6 +196,8 @@ impl TestEnv {
self.logstore = Some(logstore.clone());
self.object_store_manager = Some(object_store_manager.clone());
MitoEngine::new_for_test(config, logstore, object_store_manager, manager, listener)
+ .await
+ .unwrap()
}
/// Reopen the engine.
@@ -201,6 +209,8 @@ impl TestEnv {
self.logstore.clone().unwrap(),
self.object_store_manager.clone().unwrap(),
)
+ .await
+ .unwrap()
}
/// Open the engine.
@@ -210,6 +220,8 @@ impl TestEnv {
self.logstore.clone().unwrap(),
self.object_store_manager.clone().unwrap(),
)
+ .await
+ .unwrap()
}
/// Only initializes the object store manager, returns the default object store.
@@ -227,6 +239,8 @@ impl TestEnv {
Arc::new(log_store),
Arc::new(object_store_manager),
)
+ .await
+ .unwrap()
}
/// Returns the log store and object store manager.
diff --git a/src/mito2/src/test_util/scheduler_util.rs b/src/mito2/src/test_util/scheduler_util.rs
index 3cf69c8456ca..445151f12f5a 100644
--- a/src/mito2/src/test_util/scheduler_util.rs
+++ b/src/mito2/src/test_util/scheduler_util.rs
@@ -66,11 +66,7 @@ impl SchedulerEnv {
) -> CompactionScheduler {
let scheduler = self.get_scheduler();
- CompactionScheduler::new(
- scheduler,
- request_sender,
- Arc::new(CacheManager::new(0, 0, 0)),
- )
+ CompactionScheduler::new(scheduler, request_sender, Arc::new(CacheManager::default()))
}
/// Creates a new flush scheduler.
diff --git a/src/mito2/src/worker.rs b/src/mito2/src/worker.rs
index 40bb14a401fd..09cb59aa1b17 100644
--- a/src/mito2/src/worker.rs
+++ b/src/mito2/src/worker.rs
@@ -42,6 +42,7 @@ use store_api::storage::RegionId;
use tokio::sync::mpsc::{Receiver, Sender};
use tokio::sync::{mpsc, oneshot, Mutex};
+use crate::cache::write_cache::{WriteCache, WriteCacheRef};
use crate::cache::{CacheManager, CacheManagerRef};
use crate::compaction::CompactionScheduler;
use crate::config::MitoConfig;
@@ -111,20 +112,24 @@ impl WorkerGroup {
/// Starts a worker group.
///
/// The number of workers should be power of two.
- pub(crate) fn start<S: LogStore>(
+ pub(crate) async fn start<S: LogStore>(
config: Arc<MitoConfig>,
log_store: Arc<S>,
object_store_manager: ObjectStoreManagerRef,
- ) -> WorkerGroup {
+ ) -> Result<WorkerGroup> {
let write_buffer_manager = Arc::new(WriteBufferManagerImpl::new(
config.global_write_buffer_size.as_bytes() as usize,
));
let scheduler = Arc::new(LocalScheduler::new(config.max_background_jobs));
- let cache_manager = Arc::new(CacheManager::new(
- config.sst_meta_cache_size.as_bytes(),
- config.vector_cache_size.as_bytes(),
- config.page_cache_size.as_bytes(),
- ));
+ let write_cache = write_cache_from_config(&config, object_store_manager.clone()).await?;
+ let cache_manager = Arc::new(
+ CacheManager::builder()
+ .sst_meta_cache_size(config.sst_meta_cache_size.as_bytes())
+ .vector_cache_size(config.vector_cache_size.as_bytes())
+ .page_cache_size(config.page_cache_size.as_bytes())
+ .write_cache(write_cache)
+ .build(),
+ );
let workers = (0..config.num_workers)
.map(|id| {
@@ -142,11 +147,11 @@ impl WorkerGroup {
})
.collect();
- WorkerGroup {
+ Ok(WorkerGroup {
workers,
scheduler,
cache_manager,
- }
+ })
}
/// Stops the worker group.
@@ -204,24 +209,28 @@ impl WorkerGroup {
/// Starts a worker group with `write_buffer_manager` and `listener` for tests.
///
/// The number of workers should be power of two.
- pub(crate) fn start_for_test<S: LogStore>(
+ pub(crate) async fn start_for_test<S: LogStore>(
config: Arc<MitoConfig>,
log_store: Arc<S>,
object_store_manager: ObjectStoreManagerRef,
write_buffer_manager: Option<WriteBufferManagerRef>,
listener: Option<crate::engine::listener::EventListenerRef>,
- ) -> WorkerGroup {
+ ) -> Result<WorkerGroup> {
let write_buffer_manager = write_buffer_manager.unwrap_or_else(|| {
Arc::new(WriteBufferManagerImpl::new(
config.global_write_buffer_size.as_bytes() as usize,
))
});
let scheduler = Arc::new(LocalScheduler::new(config.max_background_jobs));
- let cache_manager = Arc::new(CacheManager::new(
- config.sst_meta_cache_size.as_bytes(),
- config.vector_cache_size.as_bytes(),
- config.page_cache_size.as_bytes(),
- ));
+ let write_cache = write_cache_from_config(&config, object_store_manager.clone()).await?;
+ let cache_manager = Arc::new(
+ CacheManager::builder()
+ .sst_meta_cache_size(config.sst_meta_cache_size.as_bytes())
+ .vector_cache_size(config.vector_cache_size.as_bytes())
+ .page_cache_size(config.page_cache_size.as_bytes())
+ .write_cache(write_cache)
+ .build(),
+ );
let workers = (0..config.num_workers)
.map(|id| {
@@ -239,11 +248,11 @@ impl WorkerGroup {
})
.collect();
- WorkerGroup {
+ Ok(WorkerGroup {
workers,
scheduler,
cache_manager,
- }
+ })
}
}
@@ -251,6 +260,26 @@ fn value_to_index(value: usize, num_workers: usize) -> usize {
value % num_workers
}
+async fn write_cache_from_config(
+ config: &MitoConfig,
+ object_store_manager: ObjectStoreManagerRef,
+) -> Result<Option<WriteCacheRef>> {
+ if !config.enable_experimental_write_cache {
+ return Ok(None);
+ }
+
+ // TODO(yingwen): Remove this and document the config once the write cache is ready.
+ warn!("Write cache is an experimental feature");
+
+ let cache = WriteCache::new_fs(
+ &config.experimental_write_cache_path,
+ object_store_manager,
+ config.experimental_write_cache_size,
+ )
+ .await?;
+ Ok(Some(Arc::new(cache)))
+}
+
/// Worker start config.
struct WorkerStarter<S> {
id: WorkerId,
diff --git a/src/object-store/src/util.rs b/src/object-store/src/util.rs
index 3ff71f8fce88..a357fb4fa279 100644
--- a/src/object-store/src/util.rs
+++ b/src/object-store/src/util.rs
@@ -13,8 +13,13 @@
// limitations under the License.
use futures::TryStreamExt;
+use opendal::layers::{LoggingLayer, TracingLayer};
use opendal::{Entry, Lister};
+use crate::layers::PrometheusMetricsLayer;
+use crate::ObjectStore;
+
+/// Collect all entries from the [Lister].
pub async fn collect(stream: Lister) -> Result<Vec<Entry>, opendal::Error> {
stream.try_collect::<Vec<_>>().await
}
@@ -52,6 +57,20 @@ pub fn join_path(parent: &str, child: &str) -> String {
opendal::raw::normalize_path(&output)
}
+/// Attaches instrument layers to the object store.
+pub fn with_instrument_layers(object_store: ObjectStore) -> ObjectStore {
+ object_store
+ .layer(
+ LoggingLayer::default()
+ // Print the expected error only in DEBUG level.
+ // See https://docs.rs/opendal/latest/opendal/layers/struct.LoggingLayer.html#method.with_error_level
+ .with_error_level(Some("debug"))
+ .expect("input error level must be valid"),
+ )
+ .layer(TracingLayer)
+ .layer(PrometheusMetricsLayer)
+}
+
#[cfg(test)]
mod tests {
use super::*;
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index 8495c0b0f00b..ade16f8bae02 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -730,6 +730,9 @@ global_write_buffer_reject_size = "2GiB"
sst_meta_cache_size = "128MiB"
vector_cache_size = "512MiB"
page_cache_size = "512MiB"
+enable_experimental_write_cache = false
+experimental_write_cache_path = ""
+experimental_write_cache_size = "512MiB"
sst_write_buffer_size = "8MiB"
parallel_scan_channel_size = 32
|
feat
|
Init the write cache in datanode (#3100)
|
849e0b924984727cacfd086a9db5bcb236e17d14
|
2024-07-05 11:53:49
|
shuiyisong
|
feat: delete pipeline (#4156)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index b67a23db9a1b..818120753264 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -7720,6 +7720,7 @@ dependencies = [
"common-time",
"crossbeam-utils",
"csv",
+ "dashmap",
"datafusion",
"datafusion-common",
"datafusion-expr",
@@ -11575,6 +11576,7 @@ dependencies = [
"tokio-stream",
"tonic 0.11.0",
"tower",
+ "url",
"uuid",
"zstd 0.13.1",
]
diff --git a/src/frontend/src/instance/log_handler.rs b/src/frontend/src/instance/log_handler.rs
index 2b431d832e33..7edda5ccf130 100644
--- a/src/frontend/src/instance/log_handler.rs
+++ b/src/frontend/src/instance/log_handler.rs
@@ -19,12 +19,8 @@ use async_trait::async_trait;
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
use client::Output;
use common_error::ext::BoxedError;
-use pipeline::table::{PipelineInfo, PipelineVersion};
-use pipeline::{GreptimeTransformer, Pipeline};
-use servers::error::{
- AuthSnafu, ExecuteGrpcRequestSnafu, PipelineSnafu, Result as ServerResult,
- UnsupportedDeletePipelineSnafu,
-};
+use pipeline::{GreptimeTransformer, Pipeline, PipelineInfo, PipelineVersion};
+use servers::error::{AuthSnafu, ExecuteGrpcRequestSnafu, PipelineSnafu, Result as ServerResult};
use servers::query_handler::LogHandler;
use session::context::QueryContextRef;
use snafu::ResultExt;
@@ -72,9 +68,16 @@ impl LogHandler for Instance {
.context(PipelineSnafu)
}
- async fn delete_pipeline(&self, _name: &str, _query_ctx: QueryContextRef) -> ServerResult<()> {
- // TODO(qtang): impl delete
- Err(UnsupportedDeletePipelineSnafu {}.build())
+ async fn delete_pipeline(
+ &self,
+ name: &str,
+ version: PipelineVersion,
+ ctx: QueryContextRef,
+ ) -> ServerResult<Option<()>> {
+ self.pipeline_operator
+ .delete_pipeline(name, version, ctx)
+ .await
+ .context(PipelineSnafu)
}
}
diff --git a/src/frontend/src/server.rs b/src/frontend/src/server.rs
index 30828bf67c2b..a80415076195 100644
--- a/src/frontend/src/server.rs
+++ b/src/frontend/src/server.rs
@@ -22,6 +22,7 @@ use common_runtime::Builder as RuntimeBuilder;
use servers::grpc::builder::GrpcServerBuilder;
use servers::grpc::greptime_handler::GreptimeRequestHandler;
use servers::grpc::{GrpcOptions, GrpcServer, GrpcServerConfig};
+use servers::http::event::LogValidatorRef;
use servers::http::{HttpServer, HttpServerBuilder};
use servers::metrics_handler::MetricsHandler;
use servers::mysql::server::{MysqlServer, MysqlSpawnConfig, MysqlSpawnRef};
@@ -89,7 +90,8 @@ where
Some(self.instance.clone()),
);
- builder = builder.with_log_ingest_handler(self.instance.clone());
+ builder = builder
+ .with_log_ingest_handler(self.instance.clone(), self.plugins.get::<LogValidatorRef>());
if let Some(user_provider) = self.plugins.get::<UserProviderRef>() {
builder = builder.with_user_provider(user_provider);
diff --git a/src/pipeline/Cargo.toml b/src/pipeline/Cargo.toml
index 03096b47a7a1..168471d75635 100644
--- a/src/pipeline/Cargo.toml
+++ b/src/pipeline/Cargo.toml
@@ -28,6 +28,7 @@ common-telemetry.workspace = true
common-time.workspace = true
crossbeam-utils.workspace = true
csv = "1.3.0"
+dashmap.workspace = true
datafusion.workspace = true
datafusion-common.workspace = true
datafusion-expr.workspace = true
diff --git a/src/pipeline/src/etl/mod.rs b/src/pipeline/src/etl.rs
similarity index 100%
rename from src/pipeline/src/etl/mod.rs
rename to src/pipeline/src/etl.rs
diff --git a/src/pipeline/src/etl/processor/mod.rs b/src/pipeline/src/etl/processor.rs
similarity index 100%
rename from src/pipeline/src/etl/processor/mod.rs
rename to src/pipeline/src/etl/processor.rs
diff --git a/src/pipeline/src/etl/transform/mod.rs b/src/pipeline/src/etl/transform.rs
similarity index 100%
rename from src/pipeline/src/etl/transform/mod.rs
rename to src/pipeline/src/etl/transform.rs
diff --git a/src/pipeline/src/etl/transform/transformer/mod.rs b/src/pipeline/src/etl/transform/transformer.rs
similarity index 100%
rename from src/pipeline/src/etl/transform/transformer/mod.rs
rename to src/pipeline/src/etl/transform/transformer.rs
diff --git a/src/pipeline/src/etl/transform/transformer/greptime/mod.rs b/src/pipeline/src/etl/transform/transformer/greptime.rs
similarity index 100%
rename from src/pipeline/src/etl/transform/transformer/greptime/mod.rs
rename to src/pipeline/src/etl/transform/transformer/greptime.rs
diff --git a/src/pipeline/src/etl/value/mod.rs b/src/pipeline/src/etl/value.rs
similarity index 100%
rename from src/pipeline/src/etl/value/mod.rs
rename to src/pipeline/src/etl/value.rs
diff --git a/src/pipeline/src/lib.rs b/src/pipeline/src/lib.rs
index 86ed9c7ea79b..23c9d2c488e2 100644
--- a/src/pipeline/src/lib.rs
+++ b/src/pipeline/src/lib.rs
@@ -14,8 +14,12 @@
mod etl;
mod manager;
+mod metrics;
pub use etl::transform::GreptimeTransformer;
pub use etl::value::Value;
pub use etl::{parse, Content, Pipeline};
-pub use manager::{error, pipeline_operator, table};
+pub use manager::{
+ error, pipeline_operator, table, util, PipelineInfo, PipelineRef, PipelineTableRef,
+ PipelineVersion,
+};
diff --git a/src/pipeline/src/manager.rs b/src/pipeline/src/manager.rs
new file mode 100644
index 000000000000..960197e083e8
--- /dev/null
+++ b/src/pipeline/src/manager.rs
@@ -0,0 +1,38 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use common_time::Timestamp;
+use datatypes::timestamp::TimestampNanosecond;
+
+use crate::table::PipelineTable;
+use crate::{GreptimeTransformer, Pipeline};
+
+pub mod error;
+pub mod pipeline_operator;
+pub mod table;
+pub mod util;
+
+/// Pipeline version. An optional timestamp with nanosecond precision.
+/// If the version is None, it means the latest version of the pipeline.
+/// User can specify the version by providing a timestamp string formatted as iso8601.
+/// When it used in cache key, it will be converted to i64 meaning the number of nanoseconds since the epoch.
+pub type PipelineVersion = Option<TimestampNanosecond>;
+
+/// Pipeline info. A tuple of timestamp and pipeline reference.
+pub type PipelineInfo = (Timestamp, PipelineRef);
+
+pub type PipelineTableRef = Arc<PipelineTable>;
+pub type PipelineRef = Arc<Pipeline<GreptimeTransformer>>;
diff --git a/src/pipeline/src/manager/error.rs b/src/pipeline/src/manager/error.rs
index ad5d8a96bebd..07332590f1f0 100644
--- a/src/pipeline/src/manager/error.rs
+++ b/src/pipeline/src/manager/error.rs
@@ -101,6 +101,13 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
+
+ #[snafu(display("Invalid pipeline version format: {}", version))]
+ InvalidPipelineVersion {
+ version: String,
+ #[snafu(implicit)]
+ location: Location,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -113,9 +120,10 @@ impl ErrorExt for Error {
PipelineTableNotFound { .. } => StatusCode::TableNotFound,
InsertPipeline { source, .. } => source.status_code(),
CollectRecords { source, .. } => source.status_code(),
- PipelineNotFound { .. } | CompilePipeline { .. } | PipelineTransform { .. } => {
- StatusCode::InvalidArguments
- }
+ PipelineNotFound { .. }
+ | CompilePipeline { .. }
+ | PipelineTransform { .. }
+ | InvalidPipelineVersion { .. } => StatusCode::InvalidArguments,
BuildDfLogicalPlan { .. } => StatusCode::Internal,
ExecuteInternalStatement { source, .. } => source.status_code(),
Catalog { source, .. } => source.status_code(),
diff --git a/src/pipeline/src/manager/mod.rs b/src/pipeline/src/manager/mod.rs
deleted file mode 100644
index 95ffb5822ec3..000000000000
--- a/src/pipeline/src/manager/mod.rs
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-pub mod error;
-pub mod pipeline_operator;
-pub mod table;
diff --git a/src/pipeline/src/manager/pipeline_operator.rs b/src/pipeline/src/manager/pipeline_operator.rs
index 5ae81a97a264..049cd80b452a 100644
--- a/src/pipeline/src/manager/pipeline_operator.rs
+++ b/src/pipeline/src/manager/pipeline_operator.rs
@@ -14,11 +14,13 @@
use std::collections::HashMap;
use std::sync::{Arc, RwLock};
+use std::time::Instant;
use api::v1::CreateTableExpr;
use catalog::{CatalogManagerRef, RegisterSystemTableRequest};
use common_catalog::consts::{default_engine, DEFAULT_PRIVATE_SCHEMA_NAME};
use common_telemetry::info;
+use futures::FutureExt;
use operator::insert::InserterRef;
use operator::statement::StatementExecutorRef;
use query::QueryEngineRef;
@@ -27,11 +29,14 @@ use snafu::{OptionExt, ResultExt};
use table::TableRef;
use crate::error::{CatalogSnafu, CreateTableSnafu, PipelineTableNotFoundSnafu, Result};
-use crate::table::{PipelineInfo, PipelineTable, PipelineTableRef, PipelineVersion};
+use crate::manager::{PipelineInfo, PipelineTableRef, PipelineVersion};
+use crate::metrics::{
+ METRIC_PIPELINE_CREATE_HISTOGRAM, METRIC_PIPELINE_DELETE_HISTOGRAM,
+ METRIC_PIPELINE_RETRIEVE_HISTOGRAM,
+};
+use crate::table::{PipelineTable, PIPELINE_TABLE_NAME};
use crate::{GreptimeTransformer, Pipeline};
-pub const PIPELINE_TABLE_NAME: &str = "pipelines";
-
/// PipelineOperator is responsible for managing pipelines.
/// It provides the ability to:
/// - Create a pipeline table if it does not exist
@@ -50,7 +55,7 @@ pub struct PipelineOperator {
impl PipelineOperator {
/// Create a table request for the pipeline table.
- pub fn create_table_request(&self, catalog: &str) -> RegisterSystemTableRequest {
+ fn create_table_request(&self, catalog: &str) -> RegisterSystemTableRequest {
let (time_index, primary_keys, column_defs) = PipelineTable::build_pipeline_schema();
let create_table_expr = CreateTableExpr {
@@ -146,20 +151,6 @@ impl PipelineOperator {
pub fn get_pipeline_table_from_cache(&self, catalog: &str) -> Option<PipelineTableRef> {
self.tables.read().unwrap().get(catalog).cloned()
}
-
- async fn insert_and_compile(
- &self,
- ctx: QueryContextRef,
- name: &str,
- content_type: &str,
- pipeline: &str,
- ) -> Result<PipelineInfo> {
- let schema = ctx.current_schema();
- self.get_pipeline_table_from_cache(ctx.current_catalog())
- .context(PipelineTableNotFoundSnafu)?
- .insert_and_compile(&schema, name, content_type, pipeline)
- .await
- }
}
impl PipelineOperator {
@@ -189,9 +180,16 @@ impl PipelineOperator {
let schema = query_ctx.current_schema();
self.create_pipeline_table_if_not_exists(query_ctx.clone())
.await?;
+
+ let timer = Instant::now();
self.get_pipeline_table_from_cache(query_ctx.current_catalog())
.context(PipelineTableNotFoundSnafu)?
.get_pipeline(&schema, name, version)
+ .inspect(|re| {
+ METRIC_PIPELINE_RETRIEVE_HISTOGRAM
+ .with_label_values(&[&re.is_ok().to_string()])
+ .observe(timer.elapsed().as_secs_f64())
+ })
.await
}
@@ -206,7 +204,38 @@ impl PipelineOperator {
self.create_pipeline_table_if_not_exists(query_ctx.clone())
.await?;
- self.insert_and_compile(query_ctx, name, content_type, pipeline)
+ let timer = Instant::now();
+ self.get_pipeline_table_from_cache(query_ctx.current_catalog())
+ .context(PipelineTableNotFoundSnafu)?
+ .insert_and_compile(&query_ctx.current_schema(), name, content_type, pipeline)
+ .inspect(|re| {
+ METRIC_PIPELINE_CREATE_HISTOGRAM
+ .with_label_values(&[&re.is_ok().to_string()])
+ .observe(timer.elapsed().as_secs_f64())
+ })
+ .await
+ }
+
+ /// Delete a pipeline by name from pipeline table.
+ pub async fn delete_pipeline(
+ &self,
+ name: &str,
+ version: PipelineVersion,
+ query_ctx: QueryContextRef,
+ ) -> Result<Option<()>> {
+ // trigger load pipeline table
+ self.create_pipeline_table_if_not_exists(query_ctx.clone())
+ .await?;
+
+ let timer = Instant::now();
+ self.get_pipeline_table_from_cache(query_ctx.current_catalog())
+ .context(PipelineTableNotFoundSnafu)?
+ .delete_pipeline(&query_ctx.current_schema(), name, version)
+ .inspect(|re| {
+ METRIC_PIPELINE_DELETE_HISTOGRAM
+ .with_label_values(&[&re.is_ok().to_string()])
+ .observe(timer.elapsed().as_secs_f64())
+ })
.await
}
}
diff --git a/src/pipeline/src/manager/table.rs b/src/pipeline/src/manager/table.rs
index 365b3f463406..d3197123cce5 100644
--- a/src/pipeline/src/manager/table.rs
+++ b/src/pipeline/src/manager/table.rs
@@ -25,9 +25,9 @@ use common_recordbatch::util as record_util;
use common_telemetry::{debug, info};
use common_time::timestamp::{TimeUnit, Timestamp};
use datafusion::datasource::DefaultTableSource;
-use datafusion::logical_expr::{and, col, lit};
-use datafusion_common::TableReference;
-use datafusion_expr::LogicalPlanBuilder;
+use datafusion::logical_expr::col;
+use datafusion_common::{TableReference, ToDFSchema};
+use datafusion_expr::{DmlStatement, LogicalPlan as DfLogicalPlan, LogicalPlanBuilder};
use datatypes::prelude::ScalarVector;
use datatypes::timestamp::TimestampNanosecond;
use datatypes::vectors::{StringVector, TimestampNanosecondVector, Vector};
@@ -44,36 +44,25 @@ use table::TableRef;
use crate::error::{
BuildDfLogicalPlanSnafu, CastTypeSnafu, CollectRecordsSnafu, CompilePipelineSnafu,
- ExecuteInternalStatementSnafu, InsertPipelineSnafu, PipelineNotFoundSnafu, Result,
+ ExecuteInternalStatementSnafu, InsertPipelineSnafu, InvalidPipelineVersionSnafu,
+ PipelineNotFoundSnafu, Result,
};
use crate::etl::transform::GreptimeTransformer;
use crate::etl::{parse, Content, Pipeline};
+use crate::manager::{PipelineInfo, PipelineVersion};
+use crate::util::{build_plan_filter, generate_pipeline_cache_key};
-/// Pipeline version. An optional timestamp with nanosecond precision.
-/// If the version is None, it means the latest version of the pipeline.
-/// User can specify the version by providing a timestamp string formatted as iso8601.
-/// When it used in cache key, it will be converted to i64 meaning the number of nanoseconds since the epoch.
-pub type PipelineVersion = Option<TimestampNanosecond>;
-
-pub type PipelineTableRef = Arc<PipelineTable>;
-
-pub type PipelineRef = Arc<Pipeline<GreptimeTransformer>>;
-
-/// Pipeline info. A tuple of timestamp and pipeline reference.
-pub type PipelineInfo = (Timestamp, PipelineRef);
-
-pub const PIPELINE_TABLE_NAME: &str = "pipelines";
-
-pub const PIPELINE_TABLE_PIPELINE_NAME_COLUMN_NAME: &str = "name";
-pub const PIPELINE_TABLE_PIPELINE_SCHEMA_COLUMN_NAME: &str = "schema";
-pub const PIPELINE_TABLE_PIPELINE_CONTENT_TYPE_COLUMN_NAME: &str = "content_type";
-pub const PIPELINE_TABLE_PIPELINE_CONTENT_COLUMN_NAME: &str = "pipeline";
-pub const PIPELINE_TABLE_CREATED_AT_COLUMN_NAME: &str = "created_at";
+pub(crate) const PIPELINE_TABLE_NAME: &str = "pipelines";
+pub(crate) const PIPELINE_TABLE_PIPELINE_NAME_COLUMN_NAME: &str = "name";
+pub(crate) const PIPELINE_TABLE_PIPELINE_SCHEMA_COLUMN_NAME: &str = "schema";
+const PIPELINE_TABLE_PIPELINE_CONTENT_TYPE_COLUMN_NAME: &str = "content_type";
+const PIPELINE_TABLE_PIPELINE_CONTENT_COLUMN_NAME: &str = "pipeline";
+pub(crate) const PIPELINE_TABLE_CREATED_AT_COLUMN_NAME: &str = "created_at";
/// Pipeline table cache size.
-pub const PIPELINES_CACHE_SIZE: u64 = 10000;
+const PIPELINES_CACHE_SIZE: u64 = 10000;
/// Pipeline table cache time to live.
-pub const PIPELINES_CACHE_TTL: Duration = Duration::from_secs(10);
+const PIPELINES_CACHE_TTL: Duration = Duration::from_secs(10);
/// PipelineTable is a table that stores the pipeline schema and content.
/// Every catalog has its own pipeline table.
@@ -216,23 +205,6 @@ impl PipelineTable {
.map_err(|e| CompilePipelineSnafu { reason: e }.build())
}
- fn generate_pipeline_cache_key(schema: &str, name: &str, version: PipelineVersion) -> String {
- match version {
- Some(version) => format!("{}/{}/{}", schema, name, i64::from(version)),
- None => format!("{}/{}/latest", schema, name),
- }
- }
-
- fn get_compiled_pipeline_from_cache(
- &self,
- schema: &str,
- name: &str,
- version: PipelineVersion,
- ) -> Option<Arc<Pipeline<GreptimeTransformer>>> {
- self.pipelines
- .get(&Self::generate_pipeline_cache_key(schema, name, version))
- }
-
/// Insert a pipeline into the pipeline table.
async fn insert_pipeline_to_pipeline_table(
&self,
@@ -276,9 +248,8 @@ impl PipelineTable {
.context(InsertPipelineSnafu)?;
info!(
- "Inserted pipeline: {} into {} table: {}, output: {:?}.",
+ "Insert pipeline success, name: {:?}, table: {:?}, output: {:?}",
name,
- PIPELINE_TABLE_NAME,
table_info.full_table_name(),
output
);
@@ -294,15 +265,21 @@ impl PipelineTable {
name: &str,
version: PipelineVersion,
) -> Result<Arc<Pipeline<GreptimeTransformer>>> {
- if let Some(pipeline) = self.get_compiled_pipeline_from_cache(schema, name, version) {
+ if let Some(pipeline) = self
+ .pipelines
+ .get(&generate_pipeline_cache_key(schema, name, version))
+ {
return Ok(pipeline);
}
- let pipeline = self.find_pipeline_by_name(schema, name, version).await?;
+ let pipeline = self
+ .find_pipeline(schema, name, version)
+ .await?
+ .context(PipelineNotFoundSnafu { name, version })?;
let compiled_pipeline = Arc::new(Self::compile_pipeline(&pipeline.0)?);
self.pipelines.insert(
- Self::generate_pipeline_cache_key(schema, name, version),
+ generate_pipeline_cache_key(schema, name, version),
compiled_pipeline.clone(),
);
Ok(compiled_pipeline)
@@ -325,11 +302,11 @@ impl PipelineTable {
{
self.pipelines.insert(
- Self::generate_pipeline_cache_key(schema, name, None),
+ generate_pipeline_cache_key(schema, name, None),
compiled_pipeline.clone(),
);
self.pipelines.insert(
- Self::generate_pipeline_cache_key(schema, name, Some(TimestampNanosecond(version))),
+ generate_pipeline_cache_key(schema, name, Some(TimestampNanosecond(version))),
compiled_pipeline.clone(),
);
}
@@ -337,12 +314,91 @@ impl PipelineTable {
Ok((version, compiled_pipeline))
}
- async fn find_pipeline_by_name(
+ pub async fn delete_pipeline(
+ &self,
+ schema: &str,
+ name: &str,
+ version: PipelineVersion,
+ ) -> Result<Option<()>> {
+ // 0. version is ensured at the http api level not None
+ ensure!(
+ version.is_some(),
+ InvalidPipelineVersionSnafu { version: "None" }
+ );
+
+ // 1. check pipeline exist in catalog
+ let pipeline = self.find_pipeline(schema, name, version).await?;
+ if pipeline.is_none() {
+ return Ok(None);
+ }
+
+ // 2. do delete
+ let table_info = self.table.table_info();
+ let table_name = TableReference::full(
+ table_info.catalog_name.clone(),
+ table_info.schema_name.clone(),
+ table_info.name.clone(),
+ );
+ let table_provider = Arc::new(DfTableProviderAdapter::new(self.table.clone()));
+ let table_source = Arc::new(DefaultTableSource::new(table_provider));
+
+ let df_schema = Arc::new(
+ table_info
+ .meta
+ .schema
+ .arrow_schema()
+ .clone()
+ .to_dfschema()
+ .context(BuildDfLogicalPlanSnafu)?,
+ );
+
+ // create scan plan
+ let logical_plan = LogicalPlanBuilder::scan(table_name.clone(), table_source, None)
+ .context(BuildDfLogicalPlanSnafu)?
+ .filter(build_plan_filter(schema, name, version))
+ .context(BuildDfLogicalPlanSnafu)?
+ .build()
+ .context(BuildDfLogicalPlanSnafu)?;
+
+ // create dml stmt
+ let stmt = DmlStatement::new(
+ table_name,
+ df_schema,
+ datafusion_expr::WriteOp::Delete,
+ Arc::new(logical_plan),
+ );
+
+ let plan = LogicalPlan::DfPlan(DfLogicalPlan::Dml(stmt));
+
+ let output = self
+ .query_engine
+ .execute(plan, Self::query_ctx(&table_info))
+ .await
+ .context(ExecuteInternalStatementSnafu)?;
+
+ info!(
+ "Delete pipeline success, name: {:?}, version: {:?}, table: {:?}, output: {:?}",
+ name,
+ version,
+ table_info.full_table_name(),
+ output
+ );
+
+ // remove cache with version and latest
+ self.pipelines
+ .remove(&generate_pipeline_cache_key(schema, name, version));
+ self.pipelines
+ .remove(&generate_pipeline_cache_key(schema, name, None));
+
+ Ok(Some(()))
+ }
+
+ async fn find_pipeline(
&self,
schema: &str,
name: &str,
version: PipelineVersion,
- ) -> Result<(String, TimestampNanosecond)> {
+ ) -> Result<Option<(String, TimestampNanosecond)>> {
let table_info = self.table.table_info();
let table_name = TableReference::full(
@@ -353,22 +409,10 @@ impl PipelineTable {
let table_provider = Arc::new(DfTableProviderAdapter::new(self.table.clone()));
let table_source = Arc::new(DefaultTableSource::new(table_provider));
- let schema_and_name_filter = and(
- col(PIPELINE_TABLE_PIPELINE_SCHEMA_COLUMN_NAME).eq(lit(schema)),
- col(PIPELINE_TABLE_PIPELINE_NAME_COLUMN_NAME).eq(lit(name)),
- );
- let filter = if let Some(v) = version {
- and(
- schema_and_name_filter,
- col(PIPELINE_TABLE_CREATED_AT_COLUMN_NAME).eq(lit(v.0.to_iso8601_string())),
- )
- } else {
- schema_and_name_filter
- };
let plan = LogicalPlanBuilder::scan(table_name, table_source, None)
.context(BuildDfLogicalPlanSnafu)?
- .filter(filter)
+ .filter(build_plan_filter(schema, name, version))
.context(BuildDfLogicalPlanSnafu)?
.project(vec![
col(PIPELINE_TABLE_PIPELINE_CONTENT_COLUMN_NAME),
@@ -401,8 +445,11 @@ impl PipelineTable {
.await
.context(CollectRecordsSnafu)?;
- ensure!(!records.is_empty(), PipelineNotFoundSnafu { name, version });
+ if records.is_empty() {
+ return Ok(None);
+ }
+ // limit 1
ensure!(
records.len() == 1 && records[0].num_columns() == 2,
PipelineNotFoundSnafu { name, version }
@@ -441,9 +488,9 @@ impl PipelineTable {
);
// Safety: asserted above
- Ok((
+ Ok(Some((
pipeline_content.get_data(0).unwrap().to_string(),
pipeline_created_at.get_data(0).unwrap(),
- ))
+ )))
}
}
diff --git a/src/pipeline/src/manager/util.rs b/src/pipeline/src/manager/util.rs
new file mode 100644
index 000000000000..6133c64215d0
--- /dev/null
+++ b/src/pipeline/src/manager/util.rs
@@ -0,0 +1,98 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use common_time::Timestamp;
+use datafusion_expr::{and, col, lit, Expr};
+use datatypes::timestamp::TimestampNanosecond;
+
+use crate::error::{InvalidPipelineVersionSnafu, Result};
+use crate::table::{
+ PIPELINE_TABLE_CREATED_AT_COLUMN_NAME, PIPELINE_TABLE_PIPELINE_NAME_COLUMN_NAME,
+ PIPELINE_TABLE_PIPELINE_SCHEMA_COLUMN_NAME,
+};
+use crate::PipelineVersion;
+
+pub fn to_pipeline_version(version_str: Option<String>) -> Result<PipelineVersion> {
+ match version_str {
+ Some(version) => {
+ let ts = Timestamp::from_str_utc(&version)
+ .map_err(|_| InvalidPipelineVersionSnafu { version }.build())?;
+ Ok(Some(TimestampNanosecond(ts)))
+ }
+ None => Ok(None),
+ }
+}
+
+pub(crate) fn build_plan_filter(schema: &str, name: &str, version: PipelineVersion) -> Expr {
+ let schema_and_name_filter = and(
+ col(PIPELINE_TABLE_PIPELINE_SCHEMA_COLUMN_NAME).eq(lit(schema)),
+ col(PIPELINE_TABLE_PIPELINE_NAME_COLUMN_NAME).eq(lit(name)),
+ );
+ if let Some(v) = version {
+ and(
+ schema_and_name_filter,
+ col(PIPELINE_TABLE_CREATED_AT_COLUMN_NAME).eq(lit(v.0.to_iso8601_string())),
+ )
+ } else {
+ schema_and_name_filter
+ }
+}
+
+pub(crate) fn generate_pipeline_cache_key(
+ schema: &str,
+ name: &str,
+ version: PipelineVersion,
+) -> String {
+ match version {
+ Some(version) => format!("{}/{}/{}", schema, name, i64::from(version)),
+ None => format!("{}/{}/latest", schema, name),
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_to_pipeline_version() {
+ let none_result = to_pipeline_version(None);
+ assert!(none_result.is_ok());
+ assert!(none_result.unwrap().is_none());
+
+ let some_result = to_pipeline_version(Some("2023-01-01 00:00:00Z".to_string()));
+ assert!(some_result.is_ok());
+ assert_eq!(
+ some_result.unwrap(),
+ Some(TimestampNanosecond::new(1672531200000000000))
+ );
+
+ let invalid = to_pipeline_version(Some("invalid".to_string()));
+ assert!(invalid.is_err());
+ }
+
+ #[test]
+ fn test_generate_pipeline_cache_key() {
+ let schema = "test_schema";
+ let name = "test_name";
+ let latest = generate_pipeline_cache_key(schema, name, None);
+ assert_eq!(latest, "test_schema/test_name/latest");
+
+ let versioned = generate_pipeline_cache_key(
+ schema,
+ name,
+ Some(TimestampNanosecond::new(1672531200000000000)),
+ );
+ assert_eq!(versioned, "test_schema/test_name/1672531200000000000");
+ }
+}
diff --git a/src/pipeline/src/metrics.rs b/src/pipeline/src/metrics.rs
new file mode 100644
index 000000000000..280f5619d468
--- /dev/null
+++ b/src/pipeline/src/metrics.rs
@@ -0,0 +1,37 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use lazy_static::lazy_static;
+use prometheus::{register_histogram_vec, HistogramVec};
+
+lazy_static! {
+ pub static ref METRIC_PIPELINE_CREATE_HISTOGRAM: HistogramVec = register_histogram_vec!(
+ "greptime_pipeline_create_duration_seconds",
+ "Histogram of the pipeline creation duration",
+ &["success"]
+ )
+ .unwrap();
+ pub static ref METRIC_PIPELINE_DELETE_HISTOGRAM: HistogramVec = register_histogram_vec!(
+ "greptime_pipeline_delete_duration_seconds",
+ "Histogram of the pipeline deletion duration",
+ &["success"]
+ )
+ .unwrap();
+ pub static ref METRIC_PIPELINE_RETRIEVE_HISTOGRAM: HistogramVec = register_histogram_vec!(
+ "greptime_pipeline_retrieve_duration_seconds",
+ "Histogram of the pipeline retrieval duration",
+ &["success"]
+ )
+ .unwrap();
+}
diff --git a/src/pipeline/tests/gsub.rs b/src/pipeline/tests/gsub.rs
index 5d25bf188b68..f1209a6f8830 100644
--- a/src/pipeline/tests/gsub.rs
+++ b/src/pipeline/tests/gsub.rs
@@ -29,7 +29,7 @@ fn test_gsub() {
let pipeline_yaml = r#"
---
-description: Pipeline for Akamai DataStream2 Log
+description: Pipeline for Demo Log
processors:
- gsub:
diff --git a/src/pipeline/tests/pipeline.rs b/src/pipeline/tests/pipeline.rs
index ff9cad1bdea1..08f2ad38116f 100644
--- a/src/pipeline/tests/pipeline.rs
+++ b/src/pipeline/tests/pipeline.rs
@@ -81,7 +81,7 @@ fn test_complex_data() {
let pipeline_yaml = r#"
---
-description: Pipeline for Akamai DataStream2 Log
+description: Pipeline for Demo Log
processors:
- urlencoding:
diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs
index 04b6fa196ca2..80c93444158f 100644
--- a/src/servers/src/error.rs
+++ b/src/servers/src/error.rs
@@ -156,12 +156,6 @@ pub enum Error {
location: Location,
},
- #[snafu(display("Unsupported delete pipeline."))]
- UnsupportedDeletePipeline {
- #[snafu(implicit)]
- location: Location,
- },
-
#[snafu(display("Failed to execute script by name: {}", name))]
ExecuteScript {
name: String,
@@ -635,7 +629,6 @@ impl ErrorExt for Error {
| FileWatch { .. } => StatusCode::Internal,
UnsupportedDataType { .. } => StatusCode::Unsupported,
- UnsupportedDeletePipeline { .. } => StatusCode::Unsupported,
#[cfg(not(windows))]
UpdateJemallocMetrics { .. } => StatusCode::Internal,
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index 26e0349de275..6204908c03a7 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -36,6 +36,7 @@ use common_time::timestamp::TimeUnit;
use common_time::Timestamp;
use datatypes::data_type::DataType;
use datatypes::schema::SchemaRef;
+use event::{LogState, LogValidatorRef};
use futures::FutureExt;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
@@ -91,6 +92,7 @@ pub mod csv_result;
#[cfg(feature = "dashboard")]
mod dashboard;
pub mod error_result;
+pub mod greptime_manage_resp;
pub mod greptime_result_v1;
pub mod influxdb_result_v1;
pub mod table_result;
@@ -589,11 +591,15 @@ impl HttpServerBuilder {
}
}
- pub fn with_log_ingest_handler(self, handler: LogHandlerRef) -> Self {
+ pub fn with_log_ingest_handler(
+ self,
+ handler: LogHandlerRef,
+ validator: Option<LogValidatorRef>,
+ ) -> Self {
Self {
router: self.router.nest(
&format!("/{HTTP_API_VERSION}/events"),
- HttpServer::route_log(handler),
+ HttpServer::route_log(handler, validator),
),
..self
}
@@ -721,19 +727,29 @@ impl HttpServer {
.with_state(metrics_handler)
}
- fn route_log<S>(log_handler: LogHandlerRef) -> Router<S> {
+ fn route_log<S>(
+ log_handler: LogHandlerRef,
+ log_validator: Option<LogValidatorRef>,
+ ) -> Router<S> {
Router::new()
.route("/logs", routing::post(event::log_ingester))
.route(
"/pipelines/:pipeline_name",
routing::post(event::add_pipeline),
)
+ .route(
+ "/pipelines/:pipeline_name",
+ routing::delete(event::delete_pipeline),
+ )
.layer(
ServiceBuilder::new()
.layer(HandleErrorLayer::new(handle_error))
.layer(RequestDecompressionLayer::new()),
)
- .with_state(log_handler)
+ .with_state(LogState {
+ log_handler,
+ log_validator,
+ })
}
fn route_sql<S>(api_state: ApiState) -> ApiRouter<S> {
diff --git a/src/servers/src/http/event.rs b/src/servers/src/http/event.rs
index 536337233c58..ea436009b004 100644
--- a/src/servers/src/http/event.rs
+++ b/src/servers/src/http/event.rs
@@ -13,6 +13,8 @@
// limitations under the License.
use std::result::Result as StdResult;
+use std::sync::Arc;
+use std::time::Instant;
use api::v1::{RowInsertRequest, RowInsertRequests, Rows};
use axum::body::HttpBody;
@@ -23,22 +25,20 @@ use axum::http::{Request, StatusCode};
use axum::response::{IntoResponse, Response};
use axum::{async_trait, BoxError, Extension, TypedHeader};
use common_telemetry::{error, warn};
-use common_time::{Timestamp, Timezone};
-use datatypes::timestamp::TimestampNanosecond;
-use http::{HeaderMap, HeaderValue};
use mime_guess::mime;
use pipeline::error::{CastTypeSnafu, PipelineTransformSnafu};
-use pipeline::table::PipelineVersion;
-use pipeline::Value as PipelineValue;
+use pipeline::util::to_pipeline_version;
+use pipeline::{PipelineVersion, Value as PipelineValue};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
-use serde_json::{json, Deserializer, Value};
+use serde_json::{Deserializer, Value};
use session::context::QueryContextRef;
-use snafu::{OptionExt, ResultExt};
+use snafu::{ensure, OptionExt, ResultExt};
use crate::error::{
InvalidParameterSnafu, ParseJsonSnafu, PipelineSnafu, Result, UnsupportedContentTypeSnafu,
};
+use crate::http::greptime_manage_resp::GreptimedbManageResponse;
use crate::http::greptime_result_v1::GreptimedbV1Response;
use crate::http::HttpResponse;
use crate::query_handler::LogHandlerRef;
@@ -51,6 +51,7 @@ pub struct LogIngesterQueryParams {
pub ignore_errors: Option<bool>,
pub version: Option<String>,
+ pub source: Option<String>,
}
pub struct PipelineContent(String);
@@ -100,11 +101,13 @@ where
#[axum_macros::debug_handler]
pub async fn add_pipeline(
- State(handler): State<LogHandlerRef>,
+ State(state): State<LogState>,
Path(pipeline_name): Path<String>,
Extension(query_ctx): Extension<QueryContextRef>,
PipelineContent(payload): PipelineContent,
-) -> Result<impl IntoResponse> {
+) -> Result<GreptimedbManageResponse> {
+ let start = Instant::now();
+ let handler = state.log_handler;
if pipeline_name.is_empty() {
return Err(InvalidParameterSnafu {
reason: "pipeline_name is required in path",
@@ -126,22 +129,10 @@ pub async fn add_pipeline(
result
.map(|pipeline| {
- let json_header =
- HeaderValue::from_str(mime_guess::mime::APPLICATION_JSON.as_ref()).unwrap();
- let mut headers = HeaderMap::new();
- headers.append(CONTENT_TYPE, json_header);
- // Safety check: unwrap is safe here because we have checked the format of the timestamp
- let version = pipeline
- .0
- .as_formatted_string(
- "%Y-%m-%d %H:%M:%S%.fZ",
- // Safety check: unwrap is safe here because we have checked the format of the timezone
- Some(Timezone::from_tz_string("UTC").unwrap()).as_ref(),
- )
- .unwrap();
- (
- headers,
- json!({"version": version, "name": pipeline_name}).to_string(),
+ GreptimedbManageResponse::from_pipeline(
+ pipeline_name,
+ pipeline.0.to_timezone_aware_string(None),
+ start.elapsed().as_millis() as u64,
)
})
.map_err(|e| {
@@ -150,6 +141,48 @@ pub async fn add_pipeline(
})
}
+#[axum_macros::debug_handler]
+pub async fn delete_pipeline(
+ State(state): State<LogState>,
+ Extension(query_ctx): Extension<QueryContextRef>,
+ Query(query_params): Query<LogIngesterQueryParams>,
+ Path(pipeline_name): Path<String>,
+) -> Result<GreptimedbManageResponse> {
+ let start = Instant::now();
+ let handler = state.log_handler;
+ ensure!(
+ !pipeline_name.is_empty(),
+ InvalidParameterSnafu {
+ reason: "pipeline_name is required",
+ }
+ );
+
+ let version_str = query_params.version.context(InvalidParameterSnafu {
+ reason: "version is required",
+ })?;
+
+ let version = to_pipeline_version(Some(version_str.clone())).context(PipelineSnafu)?;
+
+ handler
+ .delete_pipeline(&pipeline_name, version, query_ctx)
+ .await
+ .map(|v| {
+ if v.is_some() {
+ GreptimedbManageResponse::from_pipeline(
+ pipeline_name,
+ version_str,
+ start.elapsed().as_millis() as u64,
+ )
+ } else {
+ GreptimedbManageResponse::from_pipelines(vec![], start.elapsed().as_millis() as u64)
+ }
+ })
+ .map_err(|e| {
+ error!(e; "failed to delete pipeline");
+ e
+ })
+}
+
/// Transform NDJSON array into a single array
fn transform_ndjson_array_factory(
values: impl IntoIterator<Item = StdResult<Value, serde_json::Error>>,
@@ -192,12 +225,20 @@ fn transform_ndjson_array_factory(
#[axum_macros::debug_handler]
pub async fn log_ingester(
- State(handler): State<LogHandlerRef>,
+ State(log_state): State<LogState>,
Query(query_params): Query<LogIngesterQueryParams>,
Extension(query_ctx): Extension<QueryContextRef>,
TypedHeader(content_type): TypedHeader<ContentType>,
payload: String,
) -> Result<HttpResponse> {
+ if let Some(log_validator) = log_state.log_validator {
+ if let Some(response) = log_validator.validate(query_params.source.clone(), &payload) {
+ return response;
+ }
+ }
+
+ let handler = log_state.log_handler;
+
let pipeline_name = query_params.pipeline_name.context(InvalidParameterSnafu {
reason: "pipeline_name is required",
})?;
@@ -205,18 +246,7 @@ pub async fn log_ingester(
reason: "table is required",
})?;
- let version = match query_params.version {
- Some(version) => {
- let ts = Timestamp::from_str_utc(&version).map_err(|e| {
- InvalidParameterSnafu {
- reason: format!("invalid pipeline version: {} with error: {}", &version, e),
- }
- .build()
- })?;
- Some(TimestampNanosecond(ts))
- }
- None => None,
- };
+ let version = to_pipeline_version(query_params.version).context(PipelineSnafu)?;
let ignore_errors = query_params.ignore_errors.unwrap_or(false);
@@ -276,3 +306,18 @@ async fn ingest_logs_inner(
.with_execution_time(start.elapsed().as_millis() as u64);
Ok(response)
}
+
+pub trait LogValidator {
+ /// validate payload by source before processing
+ /// Return a `Some` result to indicate validation failure.
+ fn validate(&self, source: Option<String>, payload: &str) -> Option<Result<HttpResponse>>;
+}
+
+pub type LogValidatorRef = Arc<dyn LogValidator + Send + Sync>;
+
+/// axum state struct to hold log handler and validator
+#[derive(Clone)]
+pub struct LogState {
+ pub log_handler: LogHandlerRef,
+ pub log_validator: Option<LogValidatorRef>,
+}
diff --git a/src/servers/src/http/greptime_manage_resp.rs b/src/servers/src/http/greptime_manage_resp.rs
new file mode 100644
index 000000000000..d2f61715b5e3
--- /dev/null
+++ b/src/servers/src/http/greptime_manage_resp.rs
@@ -0,0 +1,136 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use axum::response::IntoResponse;
+use axum::Json;
+use http::header::CONTENT_TYPE;
+use http::HeaderValue;
+use schemars::JsonSchema;
+use serde::{Deserialize, Serialize};
+
+use crate::http::header::{GREPTIME_DB_HEADER_EXECUTION_TIME, GREPTIME_DB_HEADER_FORMAT};
+
+/// Greptimedb Manage Api Response struct
+/// Currently we have `Pipelines` and `Scripts` as control panel api
+#[derive(Serialize, Deserialize, Debug, JsonSchema)]
+pub struct GreptimedbManageResponse {
+ #[serde(flatten)]
+ pub(crate) manage_result: ManageResult,
+ pub(crate) execution_time_ms: u64,
+}
+
+impl GreptimedbManageResponse {
+ pub fn from_pipeline(name: String, version: String, execution_time_ms: u64) -> Self {
+ GreptimedbManageResponse {
+ manage_result: ManageResult::Pipelines {
+ pipelines: vec![PipelineOutput { name, version }],
+ },
+ execution_time_ms,
+ }
+ }
+
+ pub fn from_pipelines(pipelines: Vec<PipelineOutput>, execution_time_ms: u64) -> Self {
+ GreptimedbManageResponse {
+ manage_result: ManageResult::Pipelines { pipelines },
+ execution_time_ms,
+ }
+ }
+
+ pub fn with_execution_time(mut self, execution_time: u64) -> Self {
+ self.execution_time_ms = execution_time;
+ self
+ }
+
+ pub fn execution_time_ms(&self) -> u64 {
+ self.execution_time_ms
+ }
+}
+
+#[derive(Serialize, Deserialize, Debug, JsonSchema)]
+#[serde(untagged)]
+pub enum ManageResult {
+ Pipelines { pipelines: Vec<PipelineOutput> },
+ // todo(shuiyisong): refactor scripts api
+ Scripts(),
+}
+
+#[derive(Serialize, Deserialize, Debug, JsonSchema)]
+pub struct PipelineOutput {
+ name: String,
+ version: String,
+}
+
+impl IntoResponse for GreptimedbManageResponse {
+ fn into_response(self) -> axum::response::Response {
+ let execution_time = self.execution_time_ms;
+
+ let mut resp = Json(self).into_response();
+
+ // We deliberately don't add this format into [`crate::http::ResponseFormat`]
+ // because this is a format for manage api other than the data query api
+ resp.headers_mut().insert(
+ &GREPTIME_DB_HEADER_FORMAT,
+ HeaderValue::from_static("greptimedb_manage"),
+ );
+ resp.headers_mut().insert(
+ &GREPTIME_DB_HEADER_EXECUTION_TIME,
+ HeaderValue::from(execution_time),
+ );
+ resp.headers_mut().insert(
+ CONTENT_TYPE,
+ HeaderValue::from_str(mime_guess::mime::APPLICATION_JSON.as_ref()).unwrap(),
+ );
+
+ resp
+ }
+}
+
+#[cfg(test)]
+mod tests {
+
+ use arrow::datatypes::ToByteSlice;
+ use http_body::Body;
+ use hyper::body::to_bytes;
+
+ use super::*;
+
+ #[tokio::test]
+ async fn test_into_response() {
+ let resp = GreptimedbManageResponse {
+ manage_result: ManageResult::Pipelines {
+ pipelines: vec![PipelineOutput {
+ name: "test_name".to_string(),
+ version: "test_version".to_string(),
+ }],
+ },
+ execution_time_ms: 42,
+ };
+
+ let mut re = resp.into_response();
+ let data = re.data();
+
+ let data_str = format!("{:?}", data);
+ assert_eq!(
+ data_str,
+ r#"Data(Response { status: 200, version: HTTP/1.1, headers: {"content-type": "application/json", "x-greptime-format": "greptimedb_manage", "x-greptime-execution-time": "42"}, body: UnsyncBoxBody })"#
+ );
+
+ let body_bytes = to_bytes(re.into_body()).await.unwrap();
+ let body_str = String::from_utf8_lossy(body_bytes.to_byte_slice());
+ assert_eq!(
+ body_str,
+ r#"{"pipelines":[{"name":"test_name","version":"test_version"}],"execution_time_ms":42}"#
+ );
+ }
+}
diff --git a/src/servers/src/query_handler.rs b/src/servers/src/query_handler.rs
index cdd628b4bb87..1fe64e652265 100644
--- a/src/servers/src/query_handler.rs
+++ b/src/servers/src/query_handler.rs
@@ -35,8 +35,7 @@ use common_query::Output;
use headers::HeaderValue;
use opentelemetry_proto::tonic::collector::metrics::v1::ExportMetricsServiceRequest;
use opentelemetry_proto::tonic::collector::trace::v1::ExportTraceServiceRequest;
-use pipeline::table::{PipelineInfo, PipelineVersion};
-use pipeline::{GreptimeTransformer, Pipeline};
+use pipeline::{GreptimeTransformer, Pipeline, PipelineInfo, PipelineVersion};
use serde_json::Value;
use session::context::QueryContextRef;
@@ -145,5 +144,10 @@ pub trait LogHandler {
query_ctx: QueryContextRef,
) -> Result<PipelineInfo>;
- async fn delete_pipeline(&self, name: &str, query_ctx: QueryContextRef) -> Result<()>;
+ async fn delete_pipeline(
+ &self,
+ name: &str,
+ version: PipelineVersion,
+ query_ctx: QueryContextRef,
+ ) -> Result<Option<()>>;
}
diff --git a/tests-integration/Cargo.toml b/tests-integration/Cargo.toml
index 887f04a3b218..905b03bdd1b7 100644
--- a/tests-integration/Cargo.toml
+++ b/tests-integration/Cargo.toml
@@ -90,3 +90,4 @@ script.workspace = true
session = { workspace = true, features = ["testing"] }
store-api.workspace = true
tokio-postgres = "0.7"
+url = "2.3"
diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs
index 04054b524d06..76f523071ba2 100644
--- a/tests-integration/src/test_util.rs
+++ b/tests-integration/src/test_util.rs
@@ -426,6 +426,7 @@ pub async fn setup_test_http_app_with_frontend_and_user_provider(
ServerSqlQueryHandlerAdapter::arc(instance.instance.clone()),
Some(instance.instance.clone()),
)
+ .with_log_ingest_handler(instance.instance.clone(), None)
.with_greptime_config_options(instance.opts.to_toml().unwrap());
if let Some(user_provider) = user_provider {
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index c9c846807804..57ad46dfe018 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -19,7 +19,7 @@ use auth::user_provider_from_option;
use axum::http::{HeaderName, StatusCode};
use common_error::status_code::StatusCode as ErrorCode;
use prost::Message;
-use serde_json::json;
+use serde_json::{json, Value};
use servers::http::error_result::ErrorResponse;
use servers::http::greptime_result_v1::GreptimedbV1Response;
use servers::http::handler::HealthResponse;
@@ -76,6 +76,8 @@ macro_rules! http_tests {
test_dashboard_path,
test_prometheus_remote_write,
test_vm_proto_remote_write,
+
+ test_pipeline_api,
);
)*
};
@@ -1000,3 +1002,119 @@ pub async fn test_vm_proto_remote_write(store_type: StorageType) {
guard.remove_all().await;
}
+
+pub async fn test_pipeline_api(store_type: StorageType) {
+ common_telemetry::init_default_ut_logging();
+ let (app, mut guard) = setup_test_http_app_with_frontend(store_type, "test_pipeline_api").await;
+
+ // handshake
+ let client = TestClient::new(app);
+
+ let body = r#"
+processors:
+ - date:
+ field: time
+ formats:
+ - "%Y-%m-%d %H:%M:%S%.3f"
+ ignore_missing: true
+
+transform:
+ - fields:
+ - id1
+ - id2
+ type: int32
+ - fields:
+ - type
+ - log
+ - logger
+ type: string
+ - field: time
+ type: time
+ index: timestamp
+"#;
+
+ // 1. create pipeline
+ let res = client
+ .post("/v1/events/pipelines/test")
+ .header("Content-Type", "application/x-yaml")
+ .body(body)
+ .send()
+ .await;
+
+ assert_eq!(res.status(), StatusCode::OK);
+
+ let content = res.text().await;
+
+ let content = serde_json::from_str(&content);
+ assert!(content.is_ok());
+ // {"execution_time_ms":13,"pipelines":[{"name":"test","version":"2024-07-04 08:31:00.987136"}]}
+ let content: Value = content.unwrap();
+
+ let execution_time = content.get("execution_time_ms");
+ assert!(execution_time.unwrap().is_number());
+ let pipelines = content.get("pipelines");
+ let pipelines = pipelines.unwrap().as_array().unwrap();
+ assert_eq!(pipelines.len(), 1);
+ let pipeline = pipelines.first().unwrap();
+ assert_eq!(pipeline.get("name").unwrap(), "test");
+
+ let version_str = pipeline
+ .get("version")
+ .unwrap()
+ .as_str()
+ .unwrap()
+ .to_string();
+
+ // 2. write data
+ let data_body = r#"
+[
+ {
+ "id1": "2436",
+ "id2": "2528",
+ "logger": "INTERACT.MANAGER",
+ "type": "I",
+ "time": "2024-05-25 20:16:37.217",
+ "log": "ClusterAdapter:enter sendTextDataToCluster\\n"
+ }
+]
+"#;
+ let res = client
+ .post("/v1/events/logs?db=public&table=logs1&pipeline_name=test")
+ .header("Content-Type", "application/json")
+ .body(data_body)
+ .send()
+ .await;
+ assert_eq!(res.status(), StatusCode::OK);
+
+ let encoded: String = url::form_urlencoded::byte_serialize(version_str.as_bytes()).collect();
+
+ // 3. remove pipeline
+ let res = client
+ .delete(format!("/v1/events/pipelines/test?version={}", encoded).as_str())
+ .send()
+ .await;
+
+ assert_eq!(res.status(), StatusCode::OK);
+
+ // {"pipelines":[{"name":"test","version":"2024-07-04 08:55:29.038347"}],"execution_time_ms":22}
+ let content = res.text().await;
+ let content: Value = serde_json::from_str(&content).unwrap();
+ assert!(content.get("execution_time_ms").unwrap().is_number());
+
+ assert_eq!(
+ content.get("pipelines").unwrap().to_string(),
+ format!(r#"[{{"name":"test","version":"{}"}}]"#, version_str).as_str()
+ );
+
+ // 4. write data failed
+ let res = client
+ .post("/v1/events/logs?db=public&table=logs1&pipeline_name=test")
+ .header("Content-Type", "application/json")
+ .body(data_body)
+ .send()
+ .await;
+ // todo(shuiyisong): refactor http error handling
+ assert_ne!(res.status(), StatusCode::OK);
+
+ guard.remove_all().await;
+}
|
feat
|
delete pipeline (#4156)
|
1565c8d2363eb785500d356aff34c30267b60381
|
2022-11-15 13:28:54
|
Ruihang Xia
|
chore: specify import style in rustfmt (#460)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index b1a8f2b8c33b..658cd4529c34 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -119,7 +119,6 @@ name = "api"
version = "0.1.0"
dependencies = [
"common-base",
- "common-error",
"common-time",
"datatypes",
"prost 0.11.0",
@@ -277,36 +276,6 @@ dependencies = [
"tokio",
]
-[[package]]
-name = "async-io"
-version = "1.10.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e8121296a9f05be7f34aa4196b1747243b3b62e048bb7906f644f3fbfc490cf7"
-dependencies = [
- "async-lock",
- "autocfg",
- "concurrent-queue",
- "futures-lite",
- "libc",
- "log",
- "parking",
- "polling",
- "slab",
- "socket2",
- "waker-fn",
- "winapi",
-]
-
-[[package]]
-name = "async-lock"
-version = "2.6.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c8101efe8695a6c17e02911402145357e718ac92d3ff88ae8419e84b1707b685"
-dependencies = [
- "event-listener",
- "futures-lite",
-]
-
[[package]]
name = "async-stream"
version = "0.3.3"
@@ -688,12 +657,6 @@ version = "3.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c1ad822118d20d2c234f427000d5acc36eabe1e29a348c89b63dd60b13f28e5d"
-[[package]]
-name = "bytecount"
-version = "0.6.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2c676a478f63e9fa2dd5368a42f28bba0d6c560b775f38583c8bbaa7fcd67c9c"
-
[[package]]
name = "bytemuck"
version = "1.12.1"
@@ -735,37 +698,6 @@ version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c1db59621ec70f09c5e9b597b220c7a2b43611f4710dc03ceb8748637775692c"
-[[package]]
-name = "camino"
-version = "1.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "88ad0e1e3e88dd237a156ab9f571021b8a158caa0ae44b1968a241efb5144c1e"
-dependencies = [
- "serde",
-]
-
-[[package]]
-name = "cargo-platform"
-version = "0.1.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cbdb825da8a5df079a43676dbe042702f1707b1109f713a01420fbb4cc71fa27"
-dependencies = [
- "serde",
-]
-
-[[package]]
-name = "cargo_metadata"
-version = "0.14.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa"
-dependencies = [
- "camino",
- "cargo-platform",
- "semver",
- "serde",
- "serde_json",
-]
-
[[package]]
name = "caseless"
version = "0.2.1"
@@ -1026,7 +958,6 @@ dependencies = [
"common-base",
"common-error",
"common-grpc",
- "common-insert",
"common-query",
"common-recordbatch",
"common-time",
@@ -1729,7 +1660,6 @@ dependencies = [
"datafusion",
"datafusion-common",
"datatypes",
- "frontend",
"futures",
"hyper",
"log-store",
@@ -1948,15 +1878,6 @@ dependencies = [
"libc",
]
-[[package]]
-name = "error-chain"
-version = "0.12.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc"
-dependencies = [
- "version_check",
-]
-
[[package]]
name = "error-code"
version = "2.3.1"
@@ -2094,13 +2015,11 @@ dependencies = [
"async-stream",
"async-trait",
"catalog",
- "chrono",
"client",
"common-base",
"common-catalog",
"common-error",
"common-grpc",
- "common-insert",
"common-query",
"common-recordbatch",
"common-runtime",
@@ -2112,11 +2031,8 @@ dependencies = [
"datanode",
"datatypes",
"futures",
- "futures-util",
"itertools",
"meta-client",
- "meta-srv",
- "moka",
"openmetrics-parser",
"prost 0.11.0",
"query",
@@ -2258,21 +2174,6 @@ version = "0.3.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbf4d2a7a308fd4578637c0b17c7e1c7ba127b8f6ba00b29f717e9655d85eb68"
-[[package]]
-name = "futures-lite"
-version = "1.12.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48"
-dependencies = [
- "fastrand",
- "futures-core",
- "futures-io",
- "memchr",
- "parking",
- "pin-project-lite",
- "waker-fn",
-]
-
[[package]]
name = "futures-macro"
version = "0.3.24"
@@ -3044,7 +2945,6 @@ dependencies = [
"futures",
"meta-srv",
"rand 0.8.5",
- "serde",
"snafu",
"tokio",
"tokio-stream",
@@ -3184,32 +3084,6 @@ dependencies = [
"windows-sys",
]
-[[package]]
-name = "moka"
-version = "0.9.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7b49a05f67020456541f4f29cbaa812016a266a86ec76f96d3873d459c68fe5e"
-dependencies = [
- "async-io",
- "async-lock",
- "crossbeam-channel",
- "crossbeam-epoch",
- "crossbeam-utils",
- "futures-util",
- "num_cpus",
- "once_cell",
- "parking_lot",
- "quanta",
- "rustc_version",
- "scheduled-thread-pool",
- "skeptic",
- "smallvec",
- "tagptr",
- "thiserror",
- "triomphe",
- "uuid",
-]
-
[[package]]
name = "multimap"
version = "0.8.3"
@@ -3684,12 +3558,6 @@ version = "6.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ff7415e9ae3fff1225851df9e0d9e4e5479f947619774677a63572e55e80eff"
-[[package]]
-name = "parking"
-version = "2.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72"
-
[[package]]
name = "parking_lot"
version = "0.12.1"
@@ -3870,9 +3738,9 @@ dependencies = [
[[package]]
name = "pgwire"
-version = "0.5.0"
+version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5dacbf864d6cb6a0e676c9a1162ab7b315b5c8e6c87fa9b6e0ba9ba0a569adb1"
+checksum = "9e30e99a0b8acf60a6815aa8178e9ffb08178ef3ca1366673bb0d6c7ababe4c2"
dependencies = [
"async-trait",
"bytes",
@@ -3885,7 +3753,6 @@ dependencies = [
"thiserror",
"time 0.3.14",
"tokio",
- "tokio-rustls",
"tokio-util",
]
@@ -4052,20 +3919,6 @@ dependencies = [
"syn",
]
-[[package]]
-name = "polling"
-version = "2.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ab4609a838d88b73d8238967b60dd115cc08d38e2bbaf51ee1e4b695f89122e2"
-dependencies = [
- "autocfg",
- "cfg-if",
- "libc",
- "log",
- "wepoll-ffi",
- "winapi",
-]
-
[[package]]
name = "portable-atomic"
version = "0.3.15"
@@ -4273,17 +4126,6 @@ dependencies = [
"prost 0.11.0",
]
-[[package]]
-name = "pulldown-cmark"
-version = "0.9.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2d9cc634bc78768157b5cbfe988ffcd1dcba95cd2b2f03a88316c08c6d00ed63"
-dependencies = [
- "bitflags",
- "memchr",
- "unicase",
-]
-
[[package]]
name = "quanta"
version = "0.10.1"
@@ -4887,10 +4729,6 @@ dependencies = [
name = "rustpython-pylib"
version = "0.1.0"
source = "git+https://github.com/RustPython/RustPython?rev=02a1d1d#02a1d1d7db57afbb78049599c2585cc7cd59e6d3"
-dependencies = [
- "rustpython-bytecode",
- "rustpython-derive",
-]
[[package]]
name = "rustpython-vm"
@@ -5027,15 +4865,6 @@ dependencies = [
"windows-sys",
]
-[[package]]
-name = "scheduled-thread-pool"
-version = "0.2.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "977a7519bff143a44f842fd07e80ad1329295bd71686457f18e496736f4bf9bf"
-dependencies = [
- "parking_lot",
-]
-
[[package]]
name = "schemars"
version = "0.8.11"
@@ -5147,9 +4976,6 @@ name = "semver"
version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4"
-dependencies = [
- "serde",
-]
[[package]]
name = "serde"
@@ -5363,21 +5189,6 @@ version = "0.3.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de"
-[[package]]
-name = "skeptic"
-version = "0.13.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "16d23b015676c90a0f01c197bfdc786c20342c73a0afdda9025adb0bc42940a8"
-dependencies = [
- "bytecount",
- "cargo_metadata",
- "error-chain",
- "glob",
- "pulldown-cmark",
- "tempfile",
- "walkdir",
-]
-
[[package]]
name = "sketches-ddsketch"
version = "0.2.0"
@@ -5459,7 +5270,6 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d"
name = "sql"
version = "0.1.0"
dependencies = [
- "api",
"catalog",
"common-catalog",
"common-error",
@@ -5805,12 +5615,6 @@ dependencies = [
"tokio",
]
-[[package]]
-name = "tagptr"
-version = "0.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417"
-
[[package]]
name = "tap"
version = "1.0.1"
@@ -6384,12 +6188,6 @@ dependencies = [
"tracing-log",
]
-[[package]]
-name = "triomphe"
-version = "0.1.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f1ee9bd9239c339d714d657fac840c6d2a4f9c45f4f9ec7b0975113458be78db"
-
[[package]]
name = "try-lock"
version = "0.2.3"
@@ -6646,12 +6444,6 @@ version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8e76fae08f03f96e166d2dfda232190638c10e0383841252416f9cfe2ae60e6"
-[[package]]
-name = "waker-fn"
-version = "1.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca"
-
[[package]]
name = "walkdir"
version = "2.3.2"
@@ -6780,15 +6572,6 @@ dependencies = [
"webpki",
]
-[[package]]
-name = "wepoll-ffi"
-version = "0.1.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb"
-dependencies = [
- "cc",
-]
-
[[package]]
name = "which"
version = "4.3.0"
diff --git a/benchmarks/src/bin/nyc-taxi.rs b/benchmarks/src/bin/nyc-taxi.rs
index bde094183789..caac625ef4b4 100644
--- a/benchmarks/src/bin/nyc-taxi.rs
+++ b/benchmarks/src/bin/nyc-taxi.rs
@@ -4,32 +4,24 @@
#![feature(once_cell)]
#![allow(clippy::print_stdout)]
-use std::{
- collections::HashMap,
- path::{Path, PathBuf},
- sync::Arc,
- time::Instant,
-};
-
-use arrow::{
- array::{ArrayRef, PrimitiveArray, StringArray, TimestampNanosecondArray},
- datatypes::{DataType, Float64Type, Int64Type},
- record_batch::RecordBatch,
-};
+use std::collections::HashMap;
+use std::path::{Path, PathBuf};
+use std::sync::Arc;
+use std::time::Instant;
+
+use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampNanosecondArray};
+use arrow::datatypes::{DataType, Float64Type, Int64Type};
+use arrow::record_batch::RecordBatch;
use clap::Parser;
-use client::{
- admin::Admin,
- api::v1::{
- codec::InsertBatch, column::Values, insert_expr, Column, ColumnDataType, ColumnDef,
- CreateExpr, InsertExpr,
- },
- Client, Database, Select,
-};
+use client::admin::Admin;
+use client::api::v1::codec::InsertBatch;
+use client::api::v1::column::Values;
+use client::api::v1::{insert_expr, Column, ColumnDataType, ColumnDef, CreateExpr, InsertExpr};
+use client::{Client, Database, Select};
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
-use parquet::{
- arrow::{ArrowReader, ParquetFileArrowReader},
- file::{reader::FileReader, serialized_reader::SerializedFileReader},
-};
+use parquet::arrow::{ArrowReader, ParquetFileArrowReader};
+use parquet::file::reader::FileReader;
+use parquet::file::serialized_reader::SerializedFileReader;
use tokio::task::JoinSet;
const DATABASE_NAME: &str = "greptime";
diff --git a/rustfmt.toml b/rustfmt.toml
index f3dae047e27f..64d94def2665 100644
--- a/rustfmt.toml
+++ b/rustfmt.toml
@@ -1,3 +1,2 @@
group_imports = "StdExternalCrate"
-
-
+imports_granularity = "Module"
diff --git a/src/api/src/helper.rs b/src/api/src/helper.rs
index 7c1a4a9b6cf4..7958eb2c1e89 100644
--- a/src/api/src/helper.rs
+++ b/src/api/src/helper.rs
@@ -7,8 +7,7 @@ use snafu::prelude::*;
use crate::error::{self, Result};
use crate::v1::column::Values;
-use crate::v1::Column;
-use crate::v1::ColumnDataType;
+use crate::v1::{Column, ColumnDataType};
#[derive(Debug, PartialEq, Eq)]
pub struct ColumnDataTypeWrapper(ColumnDataType);
diff --git a/src/api/src/result.rs b/src/api/src/result.rs
index 34b65470d702..725ccce68c7f 100644
--- a/src/api/src/result.rs
+++ b/src/api/src/result.rs
@@ -1,8 +1,9 @@
use common_error::prelude::ErrorExt;
+use crate::v1::codec::SelectResult;
use crate::v1::{
- admin_result, codec::SelectResult, object_result, AdminResult, MutateResult, ObjectResult,
- ResultHeader, SelectResult as SelectResultRaw,
+ admin_result, object_result, AdminResult, MutateResult, ObjectResult, ResultHeader,
+ SelectResult as SelectResultRaw,
};
pub const PROTOCOL_VERSION: u32 = 1;
diff --git a/src/api/src/serde.rs b/src/api/src/serde.rs
index d97ed88dffbf..d485c7cef83e 100644
--- a/src/api/src/serde.rs
+++ b/src/api/src/serde.rs
@@ -1,10 +1,7 @@
pub use prost::DecodeError;
use prost::Message;
-use crate::v1::codec::InsertBatch;
-use crate::v1::codec::PhysicalPlanNode;
-use crate::v1::codec::RegionNumber;
-use crate::v1::codec::SelectResult;
+use crate::v1::codec::{InsertBatch, PhysicalPlanNode, RegionNumber, SelectResult};
use crate::v1::meta::TableRouteValue;
macro_rules! impl_convert_with_bytes {
@@ -36,8 +33,7 @@ mod tests {
use std::ops::Deref;
use crate::v1::codec::*;
- use crate::v1::column;
- use crate::v1::Column;
+ use crate::v1::{column, Column};
const SEMANTIC_TAG: i32 = 0;
diff --git a/src/api/src/v1/meta.rs b/src/api/src/v1/meta.rs
index 087c00be8808..fd74246a4d7a 100644
--- a/src/api/src/v1/meta.rs
+++ b/src/api/src/v1/meta.rs
@@ -1,8 +1,7 @@
tonic::include_proto!("greptime.v1.meta");
use std::collections::HashMap;
-use std::hash::Hash;
-use std::hash::Hasher;
+use std::hash::{Hash, Hasher};
pub const PROTOCOL_VERSION: u64 = 1;
diff --git a/src/catalog/src/local/manager.rs b/src/catalog/src/local/manager.rs
index 6179b5530e30..f2f09d81aab3 100644
--- a/src/catalog/src/local/manager.rs
+++ b/src/catalog/src/local/manager.rs
@@ -20,10 +20,10 @@ use table::table::TableIdProvider;
use table::TableRef;
use crate::error::{
- CatalogNotFoundSnafu, IllegalManagerStateSnafu, OpenTableSnafu, SchemaNotFoundSnafu,
- SystemCatalogSnafu, SystemCatalogTypeMismatchSnafu, TableExistsSnafu, TableNotFoundSnafu,
+ CatalogNotFoundSnafu, IllegalManagerStateSnafu, OpenTableSnafu, ReadSystemCatalogSnafu, Result,
+ SchemaExistsSnafu, SchemaNotFoundSnafu, SystemCatalogSnafu, SystemCatalogTypeMismatchSnafu,
+ TableExistsSnafu, TableNotFoundSnafu,
};
-use crate::error::{ReadSystemCatalogSnafu, Result, SchemaExistsSnafu};
use crate::local::memory::{MemoryCatalogManager, MemoryCatalogProvider, MemorySchemaProvider};
use crate::system::{
decode_system_catalog, Entry, SystemCatalogTable, TableEntry, ENTRY_TYPE_INDEX, KEY_INDEX,
diff --git a/src/catalog/src/local/memory.rs b/src/catalog/src/local/memory.rs
index 8c98ac0fbfca..65cd03506baf 100644
--- a/src/catalog/src/local/memory.rs
+++ b/src/catalog/src/local/memory.rs
@@ -2,8 +2,7 @@ use std::any::Any;
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::sync::atomic::{AtomicU32, Ordering};
-use std::sync::Arc;
-use std::sync::RwLock;
+use std::sync::{Arc, RwLock};
use common_catalog::consts::MIN_USER_TABLE_ID;
use snafu::OptionExt;
diff --git a/src/catalog/src/remote/manager.rs b/src/catalog/src/remote/manager.rs
index e397b607c75a..7d0ce17ec58a 100644
--- a/src/catalog/src/remote/manager.rs
+++ b/src/catalog/src/remote/manager.rs
@@ -22,10 +22,9 @@ use table::TableRef;
use tokio::sync::Mutex;
use crate::error::{
- CatalogNotFoundSnafu, CreateTableSnafu, InvalidCatalogValueSnafu, OpenTableSnafu,
- SchemaNotFoundSnafu, TableExistsSnafu,
+ CatalogNotFoundSnafu, CreateTableSnafu, InvalidCatalogValueSnafu, InvalidTableSchemaSnafu,
+ OpenTableSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu,
};
-use crate::error::{InvalidTableSchemaSnafu, Result};
use crate::remote::{Kv, KvBackendRef};
use crate::{
handle_system_table_request, CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef,
diff --git a/src/catalog/src/system.rs b/src/catalog/src/system.rs
index dff4c644c21a..3175d9b0a7bd 100644
--- a/src/catalog/src/system.rs
+++ b/src/catalog/src/system.rs
@@ -2,14 +2,12 @@ use std::any::Any;
use std::collections::HashMap;
use std::sync::Arc;
-use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_catalog::consts::{
- INFORMATION_SCHEMA_NAME, SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_ID,
- SYSTEM_CATALOG_TABLE_NAME,
+ DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, SYSTEM_CATALOG_NAME,
+ SYSTEM_CATALOG_TABLE_ID, SYSTEM_CATALOG_TABLE_NAME,
};
use common_query::logical_plan::Expr;
-use common_query::physical_plan::PhysicalPlanRef;
-use common_query::physical_plan::RuntimeEnv;
+use common_query::physical_plan::{PhysicalPlanRef, RuntimeEnv};
use common_recordbatch::SendableRecordBatchStream;
use common_telemetry::debug;
use common_time::timestamp::Timestamp;
diff --git a/src/client/examples/insert.rs b/src/client/examples/insert.rs
index 13850ebc11ac..9e813e3759c2 100644
--- a/src/client/examples/insert.rs
+++ b/src/client/examples/insert.rs
@@ -1,6 +1,7 @@
use std::collections::HashMap;
-use api::v1::{codec::InsertBatch, *};
+use api::v1::codec::InsertBatch;
+use api::v1::*;
use client::{Client, Database};
fn main() {
tracing::subscriber::set_global_default(tracing_subscriber::FmtSubscriber::builder().finish())
diff --git a/src/client/examples/logical.rs b/src/client/examples/logical.rs
index 843cb0f8a8a3..cd105ded5a8c 100644
--- a/src/client/examples/logical.rs
+++ b/src/client/examples/logical.rs
@@ -1,12 +1,11 @@
use api::v1::{ColumnDataType, ColumnDef, CreateExpr};
-use client::{admin::Admin, Client, Database};
+use client::admin::Admin;
+use client::{Client, Database};
use prost_09::Message;
-use substrait_proto::protobuf::{
- plan_rel::RelType as PlanRelType,
- read_rel::{NamedTable, ReadType},
- rel::RelType,
- PlanRel, ReadRel, Rel,
-};
+use substrait_proto::protobuf::plan_rel::RelType as PlanRelType;
+use substrait_proto::protobuf::read_rel::{NamedTable, ReadType};
+use substrait_proto::protobuf::rel::RelType;
+use substrait_proto::protobuf::{PlanRel, ReadRel, Rel};
use tracing::{event, Level};
fn main() {
diff --git a/src/client/examples/physical.rs b/src/client/examples/physical.rs
index 1e866fd80fbc..a6263487dc15 100644
--- a/src/client/examples/physical.rs
+++ b/src/client/examples/physical.rs
@@ -2,9 +2,9 @@ use std::sync::Arc;
use client::{Client, Database};
use common_grpc::MockExecution;
-use datafusion::physical_plan::{
- expressions::Column, projection::ProjectionExec, ExecutionPlan, PhysicalExpr,
-};
+use datafusion::physical_plan::expressions::Column;
+use datafusion::physical_plan::projection::ProjectionExec;
+use datafusion::physical_plan::{ExecutionPlan, PhysicalExpr};
use tracing::{event, Level};
fn main() {
diff --git a/src/client/src/admin.rs b/src/client/src/admin.rs
index a73d546d85fe..d0784058a04d 100644
--- a/src/client/src/admin.rs
+++ b/src/client/src/admin.rs
@@ -4,9 +4,7 @@ use common_query::Output;
use snafu::prelude::*;
use crate::database::PROTOCOL_VERSION;
-use crate::error;
-use crate::Client;
-use crate::Result;
+use crate::{error, Client, Result};
#[derive(Clone, Debug)]
pub struct Admin {
diff --git a/src/client/src/client.rs b/src/client/src/client.rs
index 8913f6139c3d..a43334bb455e 100644
--- a/src/client/src/client.rs
+++ b/src/client/src/client.rs
@@ -4,14 +4,11 @@ use api::v1::greptime_client::GreptimeClient;
use api::v1::*;
use common_grpc::channel_manager::ChannelManager;
use parking_lot::RwLock;
-use snafu::OptionExt;
-use snafu::ResultExt;
+use snafu::{OptionExt, ResultExt};
use tonic::transport::Channel;
-use crate::error;
-use crate::load_balance::LoadBalance;
-use crate::load_balance::Loadbalancer;
-use crate::Result;
+use crate::load_balance::{LoadBalance, Loadbalancer};
+use crate::{error, Result};
#[derive(Clone, Debug, Default)]
pub struct Client {
diff --git a/src/client/src/database.rs b/src/client/src/database.rs
index 2a49babbc23d..d8c4fc913cd9 100644
--- a/src/client/src/database.rs
+++ b/src/client/src/database.rs
@@ -7,8 +7,7 @@ use api::v1::{
SelectExpr,
};
use common_error::status_code::StatusCode;
-use common_grpc::AsExcutionPlan;
-use common_grpc::DefaultAsPlanImpl;
+use common_grpc::{AsExcutionPlan, DefaultAsPlanImpl};
use common_insert::column_to_vector;
use common_query::Output;
use common_recordbatch::{RecordBatch, RecordBatches};
@@ -17,12 +16,10 @@ use datatypes::prelude::*;
use datatypes::schema::{ColumnSchema, Schema};
use snafu::{ensure, OptionExt, ResultExt};
-use crate::error;
-use crate::error::ColumnToVectorSnafu;
-use crate::{
- error::{ConvertSchemaSnafu, DatanodeSnafu, DecodeSelectSnafu, EncodePhysicalSnafu},
- Client, Result,
+use crate::error::{
+ ColumnToVectorSnafu, ConvertSchemaSnafu, DatanodeSnafu, DecodeSelectSnafu, EncodePhysicalSnafu,
};
+use crate::{error, Client, Result};
pub const PROTOCOL_VERSION: u32 = 1;
diff --git a/src/client/src/lib.rs b/src/client/src/lib.rs
index c3d02a3c6bd1..3ce630a301a4 100644
--- a/src/client/src/lib.rs
+++ b/src/client/src/lib.rs
@@ -6,8 +6,6 @@ pub mod load_balance;
pub use api;
-pub use self::{
- client::Client,
- database::{Database, ObjectResult, Select},
- error::{Error, Result},
-};
+pub use self::client::Client;
+pub use self::database::{Database, ObjectResult, Select};
+pub use self::error::{Error, Result};
diff --git a/src/cmd/src/bin/greptime.rs b/src/cmd/src/bin/greptime.rs
index 8615bcd6e45a..b173af1d83d4 100644
--- a/src/cmd/src/bin/greptime.rs
+++ b/src/cmd/src/bin/greptime.rs
@@ -2,11 +2,8 @@ use std::fmt;
use clap::Parser;
use cmd::error::Result;
-use cmd::frontend;
-use cmd::metasrv;
-use cmd::{datanode, standalone};
-use common_telemetry::logging::error;
-use common_telemetry::logging::info;
+use cmd::{datanode, frontend, metasrv, standalone};
+use common_telemetry::logging::{error, info};
#[derive(Parser)]
#[clap(name = "greptimedb")]
diff --git a/src/cmd/src/metasrv.rs b/src/cmd/src/metasrv.rs
index 0e98b5419aac..9c9f3b62b5ae 100644
--- a/src/cmd/src/metasrv.rs
+++ b/src/cmd/src/metasrv.rs
@@ -4,10 +4,8 @@ use meta_srv::bootstrap;
use meta_srv::metasrv::MetaSrvOptions;
use snafu::ResultExt;
-use crate::error;
-use crate::error::Error;
-use crate::error::Result;
-use crate::toml_loader;
+use crate::error::{Error, Result};
+use crate::{error, toml_loader};
#[derive(Parser)]
pub struct Command {
diff --git a/src/common/error/src/format.rs b/src/common/error/src/format.rs
index b79c85bb2cd6..74e3c6bd5d70 100644
--- a/src/common/error/src/format.rs
+++ b/src/common/error/src/format.rs
@@ -32,7 +32,8 @@ impl<'a, E: ErrorExt + ?Sized> fmt::Debug for DebugFormat<'a, E> {
mod tests {
use std::any::Any;
- use snafu::{prelude::*, Backtrace, GenerateImplicitData};
+ use snafu::prelude::*;
+ use snafu::{Backtrace, GenerateImplicitData};
use super::*;
diff --git a/src/common/error/src/lib.rs b/src/common/error/src/lib.rs
index 7401f5163436..f65f8190a52d 100644
--- a/src/common/error/src/lib.rs
+++ b/src/common/error/src/lib.rs
@@ -4,7 +4,8 @@ pub mod mock;
pub mod status_code;
pub mod prelude {
- pub use snafu::{prelude::*, Backtrace, ErrorCompat};
+ pub use snafu::prelude::*;
+ pub use snafu::{Backtrace, ErrorCompat};
pub use crate::ext::{BoxedError, ErrorExt};
pub use crate::format::DebugFormat;
diff --git a/src/common/function-macro/tests/test_derive.rs b/src/common/function-macro/tests/test_derive.rs
index 58199aa24c99..08c57c24032b 100644
--- a/src/common/function-macro/tests/test_derive.rs
+++ b/src/common/function-macro/tests/test_derive.rs
@@ -1,5 +1,4 @@
-use common_function_macro::as_aggr_func_creator;
-use common_function_macro::AggrFuncTypeStore;
+use common_function_macro::{as_aggr_func_creator, AggrFuncTypeStore};
use static_assertions::{assert_fields, assert_impl_all};
#[as_aggr_func_creator]
diff --git a/src/common/function/src/scalars/aggregate/argmax.rs b/src/common/function/src/scalars/aggregate/argmax.rs
index 390591c676cb..dd5e2d275ffc 100644
--- a/src/common/function/src/scalars/aggregate/argmax.rs
+++ b/src/common/function/src/scalars/aggregate/argmax.rs
@@ -5,8 +5,9 @@ use common_function_macro::{as_aggr_func_creator, AggrFuncTypeStore};
use common_query::error::{BadAccumulatorImplSnafu, CreateAccumulatorSnafu, Result};
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
use common_query::prelude::*;
+use datatypes::prelude::*;
use datatypes::vectors::ConstantVector;
-use datatypes::{prelude::*, with_match_primitive_type_id};
+use datatypes::with_match_primitive_type_id;
use snafu::ensure;
// https://numpy.org/doc/stable/reference/generated/numpy.argmax.html
diff --git a/src/common/function/src/scalars/aggregate/argmin.rs b/src/common/function/src/scalars/aggregate/argmin.rs
index a3d8457ce8cd..61ccecc3cc87 100644
--- a/src/common/function/src/scalars/aggregate/argmin.rs
+++ b/src/common/function/src/scalars/aggregate/argmin.rs
@@ -5,8 +5,9 @@ use common_function_macro::{as_aggr_func_creator, AggrFuncTypeStore};
use common_query::error::{BadAccumulatorImplSnafu, CreateAccumulatorSnafu, Result};
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
use common_query::prelude::*;
+use datatypes::prelude::*;
use datatypes::vectors::ConstantVector;
-use datatypes::{prelude::*, with_match_primitive_type_id};
+use datatypes::with_match_primitive_type_id;
use snafu::ensure;
// // https://numpy.org/doc/stable/reference/generated/numpy.argmin.html
diff --git a/src/common/function/src/scalars/aggregate/diff.rs b/src/common/function/src/scalars/aggregate/diff.rs
index e43c66b4e4b0..7fb10a01ce05 100644
--- a/src/common/function/src/scalars/aggregate/diff.rs
+++ b/src/common/function/src/scalars/aggregate/diff.rs
@@ -7,10 +7,11 @@ use common_query::error::{
};
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
use common_query::prelude::*;
+use datatypes::prelude::*;
use datatypes::types::PrimitiveType;
use datatypes::value::ListValue;
use datatypes::vectors::{ConstantVector, ListVector};
-use datatypes::{prelude::*, with_match_primitive_type_id};
+use datatypes::with_match_primitive_type_id;
use num_traits::AsPrimitive;
use snafu::{ensure, OptionExt, ResultExt};
diff --git a/src/common/function/src/scalars/aggregate/mean.rs b/src/common/function/src/scalars/aggregate/mean.rs
index f4cf0839aef8..4f37a49d4e7b 100644
--- a/src/common/function/src/scalars/aggregate/mean.rs
+++ b/src/common/function/src/scalars/aggregate/mean.rs
@@ -7,8 +7,9 @@ use common_query::error::{
};
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
use common_query::prelude::*;
+use datatypes::prelude::*;
use datatypes::vectors::{ConstantVector, Float64Vector, UInt64Vector};
-use datatypes::{prelude::*, with_match_primitive_type_id};
+use datatypes::with_match_primitive_type_id;
use num_traits::AsPrimitive;
use snafu::{ensure, OptionExt};
diff --git a/src/common/function/src/scalars/aggregate/polyval.rs b/src/common/function/src/scalars/aggregate/polyval.rs
index 5a87c49d8593..aa9f2e35ebde 100644
--- a/src/common/function/src/scalars/aggregate/polyval.rs
+++ b/src/common/function/src/scalars/aggregate/polyval.rs
@@ -8,10 +8,11 @@ use common_query::error::{
};
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
use common_query::prelude::*;
+use datatypes::prelude::*;
use datatypes::types::PrimitiveType;
use datatypes::value::ListValue;
use datatypes::vectors::{ConstantVector, Int64Vector, ListVector};
-use datatypes::{prelude::*, with_match_primitive_type_id};
+use datatypes::with_match_primitive_type_id;
use num_traits::AsPrimitive;
use snafu::{ensure, OptionExt, ResultExt};
diff --git a/src/common/function/src/scalars/function_registry.rs b/src/common/function/src/scalars/function_registry.rs
index 7e70018a7d33..a3c9d5d4a34c 100644
--- a/src/common/function/src/scalars/function_registry.rs
+++ b/src/common/function/src/scalars/function_registry.rs
@@ -1,7 +1,6 @@
//! functions registry
use std::collections::HashMap;
-use std::sync::Arc;
-use std::sync::RwLock;
+use std::sync::{Arc, RwLock};
use once_cell::sync::Lazy;
diff --git a/src/common/function/src/scalars/numpy/clip.rs b/src/common/function/src/scalars/numpy/clip.rs
index 5a57fbc7a982..63455c08310f 100644
--- a/src/common/function/src/scalars/numpy/clip.rs
+++ b/src/common/function/src/scalars/numpy/clip.rs
@@ -2,8 +2,7 @@ use std::fmt;
use std::sync::Arc;
use common_query::prelude::{Signature, Volatility};
-use datatypes::data_type::ConcreteDataType;
-use datatypes::data_type::DataType;
+use datatypes::data_type::{ConcreteDataType, DataType};
use datatypes::prelude::{Scalar, VectorRef};
use datatypes::with_match_primitive_type_id;
use num_traits::AsPrimitive;
diff --git a/src/common/function/src/scalars/numpy/interp.rs b/src/common/function/src/scalars/numpy/interp.rs
index 25a34836bbe8..cacd0729150b 100644
--- a/src/common/function/src/scalars/numpy/interp.rs
+++ b/src/common/function/src/scalars/numpy/interp.rs
@@ -3,16 +3,12 @@ use std::sync::Arc;
use arrow::array::PrimitiveArray;
use arrow::compute::cast::primitive_to_primitive;
use arrow::datatypes::DataType::Float64;
-use datatypes::arrow;
use datatypes::data_type::DataType;
use datatypes::prelude::ScalarVector;
use datatypes::type_id::LogicalTypeId;
use datatypes::value::Value;
-use datatypes::vectors::Float64Vector;
-use datatypes::vectors::PrimitiveVector;
-use datatypes::vectors::Vector;
-use datatypes::vectors::VectorRef;
-use datatypes::with_match_primitive_type_id;
+use datatypes::vectors::{Float64Vector, PrimitiveVector, Vector, VectorRef};
+use datatypes::{arrow, with_match_primitive_type_id};
use snafu::{ensure, Snafu};
#[derive(Debug, Snafu)]
@@ -247,10 +243,8 @@ pub fn interp(args: &[VectorRef]) -> Result<VectorRef> {
mod tests {
use std::sync::Arc;
- use datatypes::{
- prelude::ScalarVectorBuilder,
- vectors::{Int32Vector, Int64Vector, PrimitiveVectorBuilder},
- };
+ use datatypes::prelude::ScalarVectorBuilder;
+ use datatypes::vectors::{Int32Vector, Int64Vector, PrimitiveVectorBuilder};
use super::*;
#[test]
diff --git a/src/common/function/src/scalars/timestamp/from_unixtime.rs b/src/common/function/src/scalars/timestamp/from_unixtime.rs
index 68cb99511455..b3a5472de1a2 100644
--- a/src/common/function/src/scalars/timestamp/from_unixtime.rs
+++ b/src/common/function/src/scalars/timestamp/from_unixtime.rs
@@ -9,8 +9,7 @@ use arrow::scalar::PrimitiveScalar;
use common_query::error::{IntoVectorSnafu, UnsupportedInputDataTypeSnafu};
use common_query::prelude::{Signature, Volatility};
use datatypes::prelude::ConcreteDataType;
-use datatypes::vectors::TimestampVector;
-use datatypes::vectors::VectorRef;
+use datatypes::vectors::{TimestampVector, VectorRef};
use snafu::ResultExt;
use crate::error::Result;
diff --git a/src/common/function/src/scalars/udf.rs b/src/common/function/src/scalars/udf.rs
index 94e176728a83..cef806a985ca 100644
--- a/src/common/function/src/scalars/udf.rs
+++ b/src/common/function/src/scalars/udf.rs
@@ -1,9 +1,8 @@
use std::sync::Arc;
use common_query::error::{ExecuteFunctionSnafu, FromScalarValueSnafu};
-use common_query::prelude::ScalarValue;
use common_query::prelude::{
- ColumnarValue, ReturnTypeFunction, ScalarFunctionImplementation, ScalarUdf,
+ ColumnarValue, ReturnTypeFunction, ScalarFunctionImplementation, ScalarUdf, ScalarValue,
};
use datatypes::error::Error as DataTypeError;
use datatypes::prelude::{ConcreteDataType, VectorHelper};
diff --git a/src/common/grpc/src/channel_manager.rs b/src/common/grpc/src/channel_manager.rs
index a209bb7b109d..ffb45b10eee1 100644
--- a/src/common/grpc/src/channel_manager.rs
+++ b/src/common/grpc/src/channel_manager.rs
@@ -1,14 +1,11 @@
-use std::sync::atomic::AtomicUsize;
-use std::sync::atomic::Ordering;
+use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::time::Duration;
use dashmap::mapref::entry::Entry;
use dashmap::DashMap;
use snafu::ResultExt;
-use tonic::transport::Channel as InnerChannel;
-use tonic::transport::Endpoint;
-use tonic::transport::Uri;
+use tonic::transport::{Channel as InnerChannel, Endpoint, Uri};
use tower::make::MakeConnection;
use crate::error;
diff --git a/src/common/grpc/src/error.rs b/src/common/grpc/src/error.rs
index 55489b1468b3..eb62ac7f2a30 100644
--- a/src/common/grpc/src/error.rs
+++ b/src/common/grpc/src/error.rs
@@ -84,8 +84,7 @@ impl ErrorExt for Error {
#[cfg(test)]
mod tests {
- use snafu::OptionExt;
- use snafu::ResultExt;
+ use snafu::{OptionExt, ResultExt};
use super::*;
diff --git a/src/common/grpc/src/lib.rs b/src/common/grpc/src/lib.rs
index 48abde31392a..62a2f53ecd15 100644
--- a/src/common/grpc/src/lib.rs
+++ b/src/common/grpc/src/lib.rs
@@ -4,7 +4,5 @@ pub mod physical;
pub mod writer;
pub use error::Error;
-pub use physical::{
- plan::{DefaultAsPlanImpl, MockExecution},
- AsExcutionPlan,
-};
+pub use physical::plan::{DefaultAsPlanImpl, MockExecution};
+pub use physical::AsExcutionPlan;
diff --git a/src/common/grpc/src/physical.rs b/src/common/grpc/src/physical.rs
index 2745cadc328d..fc3c6a0061c9 100644
--- a/src/common/grpc/src/physical.rs
+++ b/src/common/grpc/src/physical.rs
@@ -1,7 +1,8 @@
mod expr;
pub mod plan;
-use std::{result::Result, sync::Arc};
+use std::result::Result;
+use std::sync::Arc;
use datafusion::physical_plan::ExecutionPlan;
diff --git a/src/common/grpc/src/physical/expr.rs b/src/common/grpc/src/physical/expr.rs
index 79e75ff6e26d..e6dfe70681fa 100644
--- a/src/common/grpc/src/physical/expr.rs
+++ b/src/common/grpc/src/physical/expr.rs
@@ -1,7 +1,9 @@
-use std::{result::Result, sync::Arc};
+use std::result::Result;
+use std::sync::Arc;
use api::v1::codec;
-use datafusion::physical_plan::{expressions::Column as DfColumn, PhysicalExpr as DfPhysicalExpr};
+use datafusion::physical_plan::expressions::Column as DfColumn;
+use datafusion::physical_plan::PhysicalExpr as DfPhysicalExpr;
use snafu::OptionExt;
use crate::error::{EmptyPhysicalExprSnafu, Error, UnsupportedDfExprSnafu};
@@ -52,8 +54,10 @@ pub(crate) fn parse_df_physical_expr(
mod tests {
use std::sync::Arc;
- use api::v1::codec::{physical_expr_node::ExprType::Column, PhysicalColumn, PhysicalExprNode};
- use datafusion::physical_plan::{expressions::Column as DfColumn, PhysicalExpr};
+ use api::v1::codec::physical_expr_node::ExprType::Column;
+ use api::v1::codec::{PhysicalColumn, PhysicalExprNode};
+ use datafusion::physical_plan::expressions::Column as DfColumn;
+ use datafusion::physical_plan::PhysicalExpr;
use crate::physical::expr::{parse_df_physical_expr, parse_grpc_physical_expr};
diff --git a/src/common/grpc/src/physical/plan.rs b/src/common/grpc/src/physical/plan.rs
index 6914c833dc0a..885c7237d644 100644
--- a/src/common/grpc/src/physical/plan.rs
+++ b/src/common/grpc/src/physical/plan.rs
@@ -1,22 +1,20 @@
-use std::{ops::Deref, result::Result, sync::Arc};
-
-use api::v1::codec::{
- physical_plan_node::PhysicalPlanType, MockInputExecNode, PhysicalPlanNode, ProjectionExecNode,
-};
-use arrow::{
- array::{PrimitiveArray, Utf8Array},
- datatypes::{DataType, Field, Schema},
-};
+use std::ops::Deref;
+use std::result::Result;
+use std::sync::Arc;
+
+use api::v1::codec::physical_plan_node::PhysicalPlanType;
+use api::v1::codec::{MockInputExecNode, PhysicalPlanNode, ProjectionExecNode};
+use arrow::array::{PrimitiveArray, Utf8Array};
+use arrow::datatypes::{DataType, Field, Schema};
use async_trait::async_trait;
-use datafusion::{
- execution::runtime_env::RuntimeEnv,
- field_util::SchemaExt,
- physical_plan::{
- memory::MemoryStream, projection::ProjectionExec, ExecutionPlan, PhysicalExpr,
- SendableRecordBatchStream, Statistics,
- },
- record_batch::RecordBatch,
+use datafusion::execution::runtime_env::RuntimeEnv;
+use datafusion::field_util::SchemaExt;
+use datafusion::physical_plan::memory::MemoryStream;
+use datafusion::physical_plan::projection::ProjectionExec;
+use datafusion::physical_plan::{
+ ExecutionPlan, PhysicalExpr, SendableRecordBatchStream, Statistics,
};
+use datafusion::record_batch::RecordBatch;
use snafu::{OptionExt, ResultExt};
use crate::error::{
@@ -211,12 +209,11 @@ mod tests {
use std::sync::Arc;
use api::v1::codec::PhysicalPlanNode;
- use datafusion::physical_plan::{expressions::Column, projection::ProjectionExec};
+ use datafusion::physical_plan::expressions::Column;
+ use datafusion::physical_plan::projection::ProjectionExec;
- use crate::physical::{
- plan::{DefaultAsPlanImpl, MockExecution},
- {AsExcutionPlan, ExecutionPlanRef},
- };
+ use crate::physical::plan::{DefaultAsPlanImpl, MockExecution};
+ use crate::physical::{AsExcutionPlan, ExecutionPlanRef};
#[test]
fn test_convert_df_projection_with_bytes() {
diff --git a/src/common/grpc/src/writer.rs b/src/common/grpc/src/writer.rs
index 904b710fde2b..364c9a01822d 100644
--- a/src/common/grpc/src/writer.rs
+++ b/src/common/grpc/src/writer.rs
@@ -1,10 +1,8 @@
use std::collections::HashMap;
-use api::v1::{
- codec::InsertBatch,
- column::{SemanticType, Values},
- Column, ColumnDataType,
-};
+use api::v1::codec::InsertBatch;
+use api::v1::column::{SemanticType, Values};
+use api::v1::{Column, ColumnDataType};
use common_base::BitVec;
use snafu::ensure;
@@ -232,7 +230,8 @@ pub enum Precision {
#[cfg(test)]
mod tests {
- use api::v1::{column::SemanticType, ColumnDataType};
+ use api::v1::column::SemanticType;
+ use api::v1::ColumnDataType;
use common_base::BitVec;
use super::LinesWriter;
diff --git a/src/common/insert/src/insert.rs b/src/common/insert/src/insert.rs
index 57247b213ac5..173d9121e0fc 100644
--- a/src/common/insert/src/insert.rs
+++ b/src/common/insert/src/insert.rs
@@ -1,30 +1,24 @@
-use std::collections::HashSet;
-use std::{
- collections::{hash_map::Entry, HashMap},
- ops::Deref,
- sync::Arc,
-};
+use std::collections::hash_map::Entry;
+use std::collections::{HashMap, HashSet};
+use std::ops::Deref;
+use std::sync::Arc;
use api::helper::ColumnDataTypeWrapper;
-use api::v1::{
- codec::InsertBatch,
- column::{SemanticType, Values},
- AddColumns, Column, ColumnDataType,
-};
-use api::v1::{AddColumn, ColumnDef, CreateExpr};
+use api::v1::codec::InsertBatch;
+use api::v1::column::{SemanticType, Values};
+use api::v1::{AddColumn, AddColumns, Column, ColumnDataType, ColumnDef, CreateExpr};
use common_base::BitVec;
use common_time::timestamp::Timestamp;
-use common_time::Date;
-use common_time::DateTime;
+use common_time::{Date, DateTime};
+use datatypes::data_type::ConcreteDataType;
use datatypes::prelude::{ValueRef, VectorRef};
use datatypes::schema::SchemaRef;
-use datatypes::{data_type::ConcreteDataType, value::Value, vectors::VectorBuilder};
+use datatypes::value::Value;
+use datatypes::vectors::VectorBuilder;
use snafu::{ensure, OptionExt, ResultExt};
use table::metadata::TableId;
-use table::{
- requests::{AddColumnRequest, AlterKind, AlterTableRequest, InsertRequest},
- Table,
-};
+use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest, InsertRequest};
+use table::Table;
use crate::error::{
ColumnDataTypeSnafu, ColumnNotFoundSnafu, CreateVectorSnafu, DecodeInsertSnafu,
@@ -455,20 +449,16 @@ mod tests {
use std::sync::Arc;
use api::helper::ColumnDataTypeWrapper;
- use api::v1::{
- codec::InsertBatch,
- column::{self, SemanticType, Values},
- insert_expr, Column, ColumnDataType,
- };
+ use api::v1::codec::InsertBatch;
+ use api::v1::column::{self, SemanticType, Values};
+ use api::v1::{insert_expr, Column, ColumnDataType};
use common_base::BitVec;
use common_query::physical_plan::PhysicalPlanRef;
use common_query::prelude::Expr;
use common_time::timestamp::Timestamp;
- use datatypes::{
- data_type::ConcreteDataType,
- schema::{ColumnSchema, SchemaBuilder, SchemaRef},
- value::Value,
- };
+ use datatypes::data_type::ConcreteDataType;
+ use datatypes::schema::{ColumnSchema, SchemaBuilder, SchemaRef};
+ use datatypes::value::Value;
use snafu::ResultExt;
use table::error::Result as TableResult;
use table::metadata::TableInfoRef;
diff --git a/src/common/query/src/columnar_value.rs b/src/common/query/src/columnar_value.rs
index 8f82b62bef58..2e645979c4c3 100644
--- a/src/common/query/src/columnar_value.rs
+++ b/src/common/query/src/columnar_value.rs
@@ -1,7 +1,6 @@
use datafusion_expr::ColumnarValue as DfColumnarValue;
use datatypes::prelude::ConcreteDataType;
-use datatypes::vectors::Helper;
-use datatypes::vectors::VectorRef;
+use datatypes::vectors::{Helper, VectorRef};
use snafu::ResultExt;
use crate::error::{self, IntoVectorSnafu, Result};
diff --git a/src/common/query/src/function.rs b/src/common/query/src/function.rs
index 7f123a3fdf01..bcb4170afffd 100644
--- a/src/common/query/src/function.rs
+++ b/src/common/query/src/function.rs
@@ -102,8 +102,7 @@ pub fn to_df_return_type(func: ReturnTypeFunction) -> DfReturnTypeFunction {
mod tests {
use std::sync::Arc;
- use datatypes::prelude::ScalarVector;
- use datatypes::prelude::Vector;
+ use datatypes::prelude::{ScalarVector, Vector};
use datatypes::vectors::BooleanVector;
use super::*;
diff --git a/src/common/query/src/logical_plan/accumulator.rs b/src/common/query/src/logical_plan/accumulator.rs
index d0c60902efca..4247b61c9798 100644
--- a/src/common/query/src/logical_plan/accumulator.rs
+++ b/src/common/query/src/logical_plan/accumulator.rs
@@ -9,8 +9,7 @@ use datafusion_common::Result as DfResult;
use datafusion_expr::Accumulator as DfAccumulator;
use datatypes::prelude::*;
use datatypes::value::ListValue;
-use datatypes::vectors::Helper as VectorHelper;
-use datatypes::vectors::VectorRef;
+use datatypes::vectors::{Helper as VectorHelper, VectorRef};
use snafu::ResultExt;
use crate::error::{self, Error, FromScalarValueSnafu, IntoVectorSnafu, Result};
diff --git a/src/common/query/src/logical_plan/mod.rs b/src/common/query/src/logical_plan/mod.rs
index c19ca49f54d4..36777ce0c326 100644
--- a/src/common/query/src/logical_plan/mod.rs
+++ b/src/common/query/src/logical_plan/mod.rs
@@ -60,12 +60,12 @@ mod tests {
use arrow::array::BooleanArray;
use arrow::datatypes::DataType;
- use datafusion_expr::ColumnarValue as DfColumnarValue;
- use datafusion_expr::ScalarUDF as DfScalarUDF;
- use datafusion_expr::TypeSignature as DfTypeSignature;
+ use datafusion_expr::{
+ ColumnarValue as DfColumnarValue, ScalarUDF as DfScalarUDF,
+ TypeSignature as DfTypeSignature,
+ };
use datatypes::prelude::*;
- use datatypes::vectors::BooleanVector;
- use datatypes::vectors::VectorRef;
+ use datatypes::vectors::{BooleanVector, VectorRef};
use super::*;
use crate::error::Result;
diff --git a/src/common/query/src/logical_plan/udaf.rs b/src/common/query/src/logical_plan/udaf.rs
index a4e8d867436a..9761fb470afa 100644
--- a/src/common/query/src/logical_plan/udaf.rs
+++ b/src/common/query/src/logical_plan/udaf.rs
@@ -6,9 +6,10 @@ use std::fmt::{self, Debug, Formatter};
use std::sync::Arc;
use arrow::datatypes::DataType as ArrowDataType;
-use datafusion_expr::AccumulatorFunctionImplementation as DfAccumulatorFunctionImplementation;
-use datafusion_expr::AggregateUDF as DfAggregateUdf;
-use datafusion_expr::StateTypeFunction as DfStateTypeFunction;
+use datafusion_expr::{
+ AccumulatorFunctionImplementation as DfAccumulatorFunctionImplementation,
+ AggregateUDF as DfAggregateUdf, StateTypeFunction as DfStateTypeFunction,
+};
use datatypes::prelude::*;
use crate::function::{
diff --git a/src/common/query/src/logical_plan/udf.rs b/src/common/query/src/logical_plan/udf.rs
index 9ba40f69a357..dbd0926243bf 100644
--- a/src/common/query/src/logical_plan/udf.rs
+++ b/src/common/query/src/logical_plan/udf.rs
@@ -1,8 +1,7 @@
//! Udf module contains foundational types that are used to represent UDFs.
//! It's modifed from datafusion.
use std::fmt;
-use std::fmt::Debug;
-use std::fmt::Formatter;
+use std::fmt::{Debug, Formatter};
use std::sync::Arc;
use datafusion_expr::{
diff --git a/src/common/query/src/physical_plan.rs b/src/common/query/src/physical_plan.rs
index 2e7972e903b1..a1ebda8083af 100644
--- a/src/common/query/src/physical_plan.rs
+++ b/src/common/query/src/physical_plan.rs
@@ -4,8 +4,7 @@ use std::sync::Arc;
use async_trait::async_trait;
use common_recordbatch::adapter::{DfRecordBatchStreamAdapter, RecordBatchStreamAdapter};
-use common_recordbatch::DfSendableRecordBatchStream;
-use common_recordbatch::SendableRecordBatchStream;
+use common_recordbatch::{DfSendableRecordBatchStream, SendableRecordBatchStream};
use datafusion::arrow::datatypes::SchemaRef as DfSchemaRef;
use datafusion::error::Result as DfResult;
pub use datafusion::execution::runtime_env::RuntimeEnv;
diff --git a/src/common/query/src/prelude.rs b/src/common/query/src/prelude.rs
index 709457e393e0..4ca737456bbc 100644
--- a/src/common/query/src/prelude.rs
+++ b/src/common/query/src/prelude.rs
@@ -2,8 +2,5 @@ pub use datafusion_common::ScalarValue;
pub use crate::columnar_value::ColumnarValue;
pub use crate::function::*;
-pub use crate::logical_plan::create_udf;
-pub use crate::logical_plan::AggregateFunction;
-pub use crate::logical_plan::Expr;
-pub use crate::logical_plan::ScalarUdf;
+pub use crate::logical_plan::{create_udf, AggregateFunction, Expr, ScalarUdf};
pub use crate::signature::{Signature, TypeSignature, Volatility};
diff --git a/src/common/recordbatch/src/adapter.rs b/src/common/recordbatch/src/adapter.rs
index 18bcd7f848c3..afbc79d9e6ed 100644
--- a/src/common/recordbatch/src/adapter.rs
+++ b/src/common/recordbatch/src/adapter.rs
@@ -5,14 +5,14 @@ use std::task::{Context, Poll};
use datafusion::arrow::datatypes::SchemaRef as DfSchemaRef;
use datafusion::physical_plan::RecordBatchStream as DfRecordBatchStream;
use datafusion_common::record_batch::RecordBatch as DfRecordBatch;
-use datatypes::arrow::error::ArrowError;
-use datatypes::arrow::error::Result as ArrowResult;
+use datatypes::arrow::error::{ArrowError, Result as ArrowResult};
use datatypes::schema::{Schema, SchemaRef};
use snafu::ResultExt;
use crate::error::{self, Result};
-use crate::DfSendableRecordBatchStream;
-use crate::{RecordBatch, RecordBatchStream, SendableRecordBatchStream, Stream};
+use crate::{
+ DfSendableRecordBatchStream, RecordBatch, RecordBatchStream, SendableRecordBatchStream, Stream,
+};
/// Greptime SendableRecordBatchStream -> DataFusion RecordBatchStream
pub struct DfRecordBatchStreamAdapter {
diff --git a/src/common/recordbatch/src/util.rs b/src/common/recordbatch/src/util.rs
index 8a030289c57e..8a146be1c0e9 100644
--- a/src/common/recordbatch/src/util.rs
+++ b/src/common/recordbatch/src/util.rs
@@ -1,6 +1,7 @@
use futures::TryStreamExt;
-use crate::{error::Result, RecordBatch, SendableRecordBatchStream};
+use crate::error::Result;
+use crate::{RecordBatch, SendableRecordBatchStream};
pub async fn collect(stream: SendableRecordBatchStream) -> Result<Vec<RecordBatch>> {
stream.try_collect::<Vec<_>>().await
@@ -16,8 +17,7 @@ mod tests {
use datafusion_common::record_batch::RecordBatch as DfRecordBatch;
use datatypes::arrow::array::UInt32Array;
use datatypes::arrow::datatypes::{DataType, Field, Schema as ArrowSchema};
- use datatypes::schema::Schema;
- use datatypes::schema::SchemaRef;
+ use datatypes::schema::{Schema, SchemaRef};
use futures::task::{Context, Poll};
use futures::Stream;
diff --git a/src/common/runtime/src/runtime.rs b/src/common/runtime/src/runtime.rs
index 9c226bd2f997..97949494e0d0 100644
--- a/src/common/runtime/src/runtime.rs
+++ b/src/common/runtime/src/runtime.rs
@@ -1,6 +1,7 @@
+use std::future::Future;
use std::sync::Arc;
use std::thread;
-use std::{future::Future, time::Duration};
+use std::time::Duration;
use metrics::{decrement_gauge, increment_gauge};
use snafu::ResultExt;
@@ -165,7 +166,9 @@ fn on_thread_unpark(thread_name: String) -> impl Fn() + 'static {
#[cfg(test)]
mod tests {
- use std::{sync::Arc, thread, time::Duration};
+ use std::sync::Arc;
+ use std::thread;
+ use std::time::Duration;
use common_telemetry::metric;
use tokio::sync::oneshot;
diff --git a/src/common/substrait/src/df_logical.rs b/src/common/substrait/src/df_logical.rs
index 45adbb748c98..b991004ecd09 100644
--- a/src/common/substrait/src/df_logical.rs
+++ b/src/common/substrait/src/df_logical.rs
@@ -7,21 +7,17 @@ use datafusion::datasource::TableProvider;
use datafusion::logical_plan::{LogicalPlan, TableScan, ToDFSchema};
use datafusion::physical_plan::project_schema;
use prost::Message;
-use snafu::ensure;
-use snafu::{OptionExt, ResultExt};
+use snafu::{ensure, OptionExt, ResultExt};
use substrait_proto::protobuf::expression::mask_expression::{StructItem, StructSelect};
use substrait_proto::protobuf::expression::MaskExpression;
use substrait_proto::protobuf::plan_rel::RelType as PlanRelType;
use substrait_proto::protobuf::read_rel::{NamedTable, ReadType};
use substrait_proto::protobuf::rel::RelType;
-use substrait_proto::protobuf::PlanRel;
-use substrait_proto::protobuf::ReadRel;
-use substrait_proto::protobuf::Rel;
+use substrait_proto::protobuf::{PlanRel, ReadRel, Rel};
use table::table::adapter::DfTableProviderAdapter;
-use crate::error::Error;
use crate::error::{
- DFInternalSnafu, DecodeRelSnafu, EmptyPlanSnafu, EncodeRelSnafu, InternalSnafu,
+ DFInternalSnafu, DecodeRelSnafu, EmptyPlanSnafu, EncodeRelSnafu, Error, InternalSnafu,
InvalidParametersSnafu, MissingFieldSnafu, SchemaNotMatchSnafu, TableNotFoundSnafu,
UnknownPlanSnafu, UnsupportedExprSnafu, UnsupportedPlanSnafu,
};
@@ -334,15 +330,13 @@ impl DFLogicalSubstraitConvertor {
#[cfg(test)]
mod test {
- use catalog::local::LocalCatalogManager;
- use catalog::{
- local::{MemoryCatalogProvider, MemorySchemaProvider},
- CatalogList, CatalogProvider, RegisterTableRequest,
- };
+ use catalog::local::{LocalCatalogManager, MemoryCatalogProvider, MemorySchemaProvider};
+ use catalog::{CatalogList, CatalogProvider, RegisterTableRequest};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use datafusion::logical_plan::DFSchema;
use datatypes::schema::Schema;
- use table::{requests::CreateTableRequest, test_util::EmptyTable, test_util::MockTableEngine};
+ use table::requests::CreateTableRequest;
+ use table::test_util::{EmptyTable, MockTableEngine};
use super::*;
use crate::schema::test::supported_types;
diff --git a/src/common/substrait/src/types.rs b/src/common/substrait/src/types.rs
index b15f7ed7a062..ba6cea28d10f 100644
--- a/src/common/substrait/src/types.rs
+++ b/src/common/substrait/src/types.rs
@@ -8,8 +8,7 @@ use datatypes::prelude::ConcreteDataType;
use substrait_proto::protobuf::r#type::{self as s_type, Kind, Nullability};
use substrait_proto::protobuf::Type as SType;
-use crate::error::Result;
-use crate::error::{UnsupportedConcreteTypeSnafu, UnsupportedSubstraitTypeSnafu};
+use crate::error::{Result, UnsupportedConcreteTypeSnafu, UnsupportedSubstraitTypeSnafu};
macro_rules! substrait_kind {
($desc:ident, $concrete_ty:ident) => {{
diff --git a/src/common/telemetry/src/lib.rs b/src/common/telemetry/src/lib.rs
index 811b954c2dfc..ed3dd5a79a0c 100644
--- a/src/common/telemetry/src/lib.rs
+++ b/src/common/telemetry/src/lib.rs
@@ -3,12 +3,7 @@ mod macros;
pub mod metric;
mod panic_hook;
-pub use common_error;
-pub use logging::init_default_ut_logging;
-pub use logging::init_global_logging;
+pub use logging::{init_default_ut_logging, init_global_logging};
pub use metric::init_default_metrics_recorder;
pub use panic_hook::set_panic_hook;
-pub use tracing;
-pub use tracing_appender;
-pub use tracing_futures;
-pub use tracing_subscriber;
+pub use {common_error, tracing, tracing_appender, tracing_futures, tracing_subscriber};
diff --git a/src/common/telemetry/src/logging.rs b/src/common/telemetry/src/logging.rs
index a9cd69d81edb..2698e2e3f681 100644
--- a/src/common/telemetry/src/logging.rs
+++ b/src/common/telemetry/src/logging.rs
@@ -1,24 +1,18 @@
//! logging stuffs, inspired by databend
use std::env;
-use std::sync::Arc;
-use std::sync::Mutex;
-use std::sync::Once;
+use std::sync::{Arc, Mutex, Once};
use once_cell::sync::Lazy;
use opentelemetry::global;
use opentelemetry::sdk::propagation::TraceContextPropagator;
pub use tracing::{event, span, Level};
use tracing_appender::non_blocking::WorkerGuard;
-use tracing_appender::rolling::RollingFileAppender;
-use tracing_appender::rolling::Rotation;
-use tracing_bunyan_formatter::BunyanFormattingLayer;
-use tracing_bunyan_formatter::JsonStorageLayer;
+use tracing_appender::rolling::{RollingFileAppender, Rotation};
+use tracing_bunyan_formatter::{BunyanFormattingLayer, JsonStorageLayer};
use tracing_log::LogTracer;
-use tracing_subscriber::filter;
use tracing_subscriber::fmt::Layer;
use tracing_subscriber::layer::SubscriberExt;
-use tracing_subscriber::EnvFilter;
-use tracing_subscriber::Registry;
+use tracing_subscriber::{filter, EnvFilter, Registry};
pub use crate::{debug, error, info, log, trace, warn};
diff --git a/src/common/time/src/date.rs b/src/common/time/src/date.rs
index 6f13046afa7c..5f67a4b8a001 100644
--- a/src/common/time/src/date.rs
+++ b/src/common/time/src/date.rs
@@ -6,8 +6,7 @@ use serde::{Deserialize, Serialize};
use serde_json::Value;
use snafu::ResultExt;
-use crate::error::Result;
-use crate::error::{Error, ParseDateStrSnafu};
+use crate::error::{Error, ParseDateStrSnafu, Result};
const UNIX_EPOCH_FROM_CE: i32 = 719_163;
diff --git a/src/common/time/src/timestamp.rs b/src/common/time/src/timestamp.rs
index 4e969158e343..d3538d78b565 100644
--- a/src/common/time/src/timestamp.rs
+++ b/src/common/time/src/timestamp.rs
@@ -3,7 +3,8 @@ use std::cmp::Ordering;
use std::hash::{Hash, Hasher};
use std::str::FromStr;
-use chrono::{offset::Local, DateTime, LocalResult, NaiveDateTime, TimeZone, Utc};
+use chrono::offset::Local;
+use chrono::{DateTime, LocalResult, NaiveDateTime, TimeZone, Utc};
use serde::{Deserialize, Serialize};
use crate::error::{Error, ParseTimestampSnafu};
diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs
index 426a66c1753e..98bfe85e2014 100644
--- a/src/datanode/src/instance.rs
+++ b/src/datanode/src/instance.rs
@@ -1,19 +1,23 @@
+use std::sync::Arc;
use std::time::Duration;
-use std::{fs, path, sync::Arc};
+use std::{fs, path};
use catalog::remote::MetaKvBackend;
use catalog::CatalogManagerRef;
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
use common_telemetry::logging::info;
use frontend::frontend::Mode;
-use log_store::fs::{config::LogConfig, log::LocalFileLogStore};
+use log_store::fs::config::LogConfig;
+use log_store::fs::log::LocalFileLogStore;
use meta_client::client::{MetaClient, MetaClientBuilder};
use meta_client::MetaClientOpts;
use object_store::layers::LoggingLayer;
-use object_store::{services::fs::Builder, util, ObjectStore};
+use object_store::services::fs::Builder;
+use object_store::{util, ObjectStore};
use query::query_engine::{QueryEngineFactory, QueryEngineRef};
use snafu::prelude::*;
-use storage::{config::EngineConfig as StorageEngineConfig, EngineImpl};
+use storage::config::EngineConfig as StorageEngineConfig;
+use storage::EngineImpl;
use table::table::TableIdProviderRef;
use table_engine::config::EngineConfig as TableEngineConfig;
use table_engine::engine::MitoEngine;
diff --git a/src/datanode/src/instance/sql.rs b/src/datanode/src/instance/sql.rs
index 3878ecf5966e..ae5542d75082 100644
--- a/src/datanode/src/instance/sql.rs
+++ b/src/datanode/src/instance/sql.rs
@@ -1,10 +1,8 @@
use async_trait::async_trait;
use common_error::prelude::BoxedError;
use common_query::Output;
-use common_telemetry::{
- logging::{error, info},
- timer,
-};
+use common_telemetry::logging::{error, info};
+use common_telemetry::timer;
use servers::query_handler::SqlQueryHandler;
use snafu::prelude::*;
use sql::statements::statement::Statement;
diff --git a/src/datanode/src/mock.rs b/src/datanode/src/mock.rs
index 942f72c4f66b..4c8720bec8a3 100644
--- a/src/datanode/src/mock.rs
+++ b/src/datanode/src/mock.rs
@@ -24,9 +24,7 @@ impl Instance {
// This method is used in other crate's testing codes, so move it out of "cfg(test)".
// TODO(LFC): Delete it when callers no longer need it.
pub async fn new_mock() -> Result<Self> {
- use table_engine::table::test_util::new_test_object_store;
- use table_engine::table::test_util::MockEngine;
- use table_engine::table::test_util::MockMitoEngine;
+ use table_engine::table::test_util::{new_test_object_store, MockEngine, MockMitoEngine};
let mock_info = meta_srv::mocks::mock_with_memstore().await;
let meta_client = Some(Arc::new(mock_meta_client(mock_info, 0).await));
diff --git a/src/datanode/src/server/grpc/ddl.rs b/src/datanode/src/server/grpc/ddl.rs
index 735874651e08..4247d542abf1 100644
--- a/src/datanode/src/server/grpc/ddl.rs
+++ b/src/datanode/src/server/grpc/ddl.rs
@@ -2,13 +2,13 @@ use std::sync::Arc;
use api::helper::ColumnDataTypeWrapper;
use api::result::AdminResultBuilder;
-use api::v1::{alter_expr::Kind, AdminResult, AlterExpr, ColumnDef, CreateExpr};
+use api::v1::alter_expr::Kind;
+use api::v1::{AdminResult, AlterExpr, ColumnDef, CreateExpr};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_error::prelude::{ErrorExt, StatusCode};
use common_query::Output;
use common_telemetry::{error, info};
-use datatypes::schema::ColumnDefaultConstraint;
-use datatypes::schema::{ColumnSchema, SchemaBuilder, SchemaRef};
+use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, SchemaBuilder, SchemaRef};
use futures::TryFutureExt;
use snafu::prelude::*;
use table::metadata::TableId;
diff --git a/src/datanode/src/server/grpc/plan.rs b/src/datanode/src/server/grpc/plan.rs
index cf18d43764bc..0a8dd11c9684 100644
--- a/src/datanode/src/server/grpc/plan.rs
+++ b/src/datanode/src/server/grpc/plan.rs
@@ -1,16 +1,13 @@
use std::sync::Arc;
-use common_grpc::AsExcutionPlan;
-use common_grpc::DefaultAsPlanImpl;
-use common_query::physical_plan::PhysicalPlanAdapter;
-use common_query::physical_plan::PhysicalPlanRef;
+use common_grpc::{AsExcutionPlan, DefaultAsPlanImpl};
+use common_query::physical_plan::{PhysicalPlanAdapter, PhysicalPlanRef};
use common_query::Output;
use datatypes::schema::Schema;
use query::QueryEngineRef;
use snafu::ResultExt;
-use crate::error::Result;
-use crate::error::{ConvertSchemaSnafu, ExecutePhysicalPlanSnafu, IntoPhysicalPlanSnafu};
+use crate::error::{ConvertSchemaSnafu, ExecutePhysicalPlanSnafu, IntoPhysicalPlanSnafu, Result};
pub struct PhysicalPlanner {
query_engine: QueryEngineRef,
diff --git a/src/datanode/src/server/grpc/select.rs b/src/datanode/src/server/grpc/select.rs
index 5aa5f412e20c..431fbaf32154 100644
--- a/src/datanode/src/server/grpc/select.rs
+++ b/src/datanode/src/server/grpc/select.rs
@@ -2,7 +2,9 @@ use std::sync::Arc;
use api::helper::ColumnDataTypeWrapper;
use api::result::{build_err_result, ObjectResultBuilder};
-use api::v1::{codec::SelectResult, column::SemanticType, column::Values, Column, ObjectResult};
+use api::v1::codec::SelectResult;
+use api::v1::column::{SemanticType, Values};
+use api::v1::{Column, ObjectResult};
use arrow::array::{Array, BooleanArray, PrimitiveArray};
use common_base::BitVec;
use common_error::status_code::StatusCode;
@@ -183,18 +185,14 @@ pub fn values(arrays: &[Arc<dyn Array>]) -> Result<Values> {
mod tests {
use std::sync::Arc;
- use arrow::{
- array::{Array, BooleanArray, PrimitiveArray},
- datatypes::{DataType, Field},
- };
+ use arrow::array::{Array, BooleanArray, PrimitiveArray};
+ use arrow::datatypes::{DataType, Field};
use common_recordbatch::{RecordBatch, RecordBatches};
use datafusion::field_util::SchemaExt;
use datatypes::arrow::datatypes::Schema as ArrowSchema;
- use datatypes::{
- arrow_array::StringArray,
- schema::Schema,
- vectors::{UInt32Vector, VectorRef},
- };
+ use datatypes::arrow_array::StringArray;
+ use datatypes::schema::Schema;
+ use datatypes::vectors::{UInt32Vector, VectorRef};
use crate::server::grpc::select::{null_mask, try_convert, values};
diff --git a/src/datanode/src/sql.rs b/src/datanode/src/sql.rs
index 5e3c8fb38bdc..16554fa3144e 100644
--- a/src/datanode/src/sql.rs
+++ b/src/datanode/src/sql.rs
@@ -83,7 +83,8 @@ mod tests {
use datatypes::schema::{ColumnSchema, SchemaBuilder, SchemaRef};
use datatypes::value::Value;
use log_store::fs::noop::NoopLogStore;
- use object_store::{services::fs::Builder, ObjectStore};
+ use object_store::services::fs::Builder;
+ use object_store::ObjectStore;
use query::QueryEngineFactory;
use sql::statements::statement::Statement;
use storage::config::EngineConfig as StorageEngineConfig;
diff --git a/src/datanode/src/sql/insert.rs b/src/datanode/src/sql/insert.rs
index fd1a7e6fe8fc..673e9a490f0a 100644
--- a/src/datanode/src/sql/insert.rs
+++ b/src/datanode/src/sql/insert.rs
@@ -1,12 +1,10 @@
use catalog::SchemaProviderRef;
use common_query::Output;
-use datatypes::prelude::ConcreteDataType;
-use datatypes::prelude::VectorBuilder;
-use snafu::ensure;
-use snafu::OptionExt;
-use snafu::ResultExt;
+use datatypes::prelude::{ConcreteDataType, VectorBuilder};
+use snafu::{ensure, OptionExt, ResultExt};
use sql::ast::Value as SqlValue;
-use sql::statements::{self, insert::Insert};
+use sql::statements::insert::Insert;
+use sql::statements::{self};
use table::engine::TableReference;
use table::requests::*;
diff --git a/src/datanode/src/tests/grpc_test.rs b/src/datanode/src/tests/grpc_test.rs
index d1235403536e..20865e4a6a86 100644
--- a/src/datanode/src/tests/grpc_test.rs
+++ b/src/datanode/src/tests/grpc_test.rs
@@ -4,11 +4,13 @@ use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
+use api::v1::alter_expr::Kind;
+use api::v1::codec::InsertBatch;
+use api::v1::column::SemanticType;
use api::v1::{
- admin_result, alter_expr::Kind, codec::InsertBatch, column, column::SemanticType, insert_expr,
- AddColumn, AlterExpr, Column, ColumnDef, CreateExpr, InsertExpr, MutateResult,
+ admin_result, column, insert_expr, AddColumn, AddColumns, AlterExpr, Column, ColumnDataType,
+ ColumnDef, CreateExpr, InsertExpr, MutateResult,
};
-use api::v1::{AddColumns, ColumnDataType};
use client::admin::Admin;
use client::{Client, Database, ObjectResult};
use common_catalog::consts::MIN_USER_TABLE_ID;
diff --git a/src/datanode/src/tests/http_test.rs b/src/datanode/src/tests/http_test.rs
index 0f703e1526c6..187cb6414d78 100644
--- a/src/datanode/src/tests/http_test.rs
+++ b/src/datanode/src/tests/http_test.rs
@@ -5,8 +5,7 @@ use axum::Router;
use axum_test_helper::TestClient;
use datatypes::prelude::ConcreteDataType;
use frontend::frontend::FrontendOptions;
-use frontend::instance::FrontendInstance;
-use frontend::instance::Instance as FeInstance;
+use frontend::instance::{FrontendInstance, Instance as FeInstance};
use serde_json::json;
use servers::http::{ColumnSchema, HttpServer, JsonOutput, JsonResponse, Schema};
use test_util::TestGuard;
diff --git a/src/datanode/src/tests/test_util.rs b/src/datanode/src/tests/test_util.rs
index 40c7cb18da39..3c63b844c492 100644
--- a/src/datanode/src/tests/test_util.rs
+++ b/src/datanode/src/tests/test_util.rs
@@ -7,8 +7,7 @@ use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnSchema, SchemaBuilder};
use frontend::frontend::Mode;
use snafu::ResultExt;
-use table::engine::EngineContext;
-use table::engine::TableEngineRef;
+use table::engine::{EngineContext, TableEngineRef};
use table::requests::CreateTableRequest;
use table_engine::config::EngineConfig;
use table_engine::table::test_util::{new_test_object_store, MockEngine, MockMitoEngine};
diff --git a/src/datatypes/src/arrow_array.rs b/src/datatypes/src/arrow_array.rs
index 8b8f234ee025..ea982a84d12e 100644
--- a/src/datatypes/src/arrow_array.rs
+++ b/src/datatypes/src/arrow_array.rs
@@ -85,9 +85,9 @@ pub fn arrow_array_get(array: &dyn Array, idx: usize) -> Result<Value> {
#[cfg(test)]
mod test {
- use arrow::array::Int64Array as ArrowI64Array;
- use arrow::array::*;
- use arrow::array::{MutableListArray, MutablePrimitiveArray, TryExtend};
+ use arrow::array::{
+ Int64Array as ArrowI64Array, MutableListArray, MutablePrimitiveArray, TryExtend, *,
+ };
use arrow::buffer::Buffer;
use arrow::datatypes::{DataType, TimeUnit as ArrowTimeUnit};
use common_time::timestamp::{TimeUnit, Timestamp};
diff --git a/src/datatypes/src/data_type.rs b/src/datatypes/src/data_type.rs
index da9c4c8b5bd9..778bf79e1020 100644
--- a/src/datatypes/src/data_type.rs
+++ b/src/datatypes/src/data_type.rs
@@ -8,10 +8,10 @@ use serde::{Deserialize, Serialize};
use crate::error::{self, Error, Result};
use crate::type_id::LogicalTypeId;
use crate::types::{
- BinaryType, BooleanType, DateType, Float32Type, Float64Type, Int16Type, Int32Type, Int64Type,
- Int8Type, ListType, NullType, StringType, UInt16Type, UInt32Type, UInt64Type, UInt8Type,
+ BinaryType, BooleanType, DateTimeType, DateType, Float32Type, Float64Type, Int16Type,
+ Int32Type, Int64Type, Int8Type, ListType, NullType, StringType, TimestampType, UInt16Type,
+ UInt32Type, UInt64Type, UInt8Type,
};
-use crate::types::{DateTimeType, TimestampType};
use crate::value::Value;
use crate::vectors::MutableVector;
diff --git a/src/datatypes/src/types/primitive_type.rs b/src/datatypes/src/types/primitive_type.rs
index ad2d59773dac..19418544f528 100644
--- a/src/datatypes/src/types/primitive_type.rs
+++ b/src/datatypes/src/types/primitive_type.rs
@@ -9,8 +9,7 @@ use snafu::OptionExt;
use crate::data_type::{ConcreteDataType, DataType};
use crate::error::{self, Result};
-use crate::scalars::ScalarVectorBuilder;
-use crate::scalars::{Scalar, ScalarRef};
+use crate::scalars::{Scalar, ScalarRef, ScalarVectorBuilder};
use crate::type_id::LogicalTypeId;
use crate::types::primitive_traits::Primitive;
use crate::value::{Value, ValueRef};
diff --git a/src/datatypes/src/vectors/binary.rs b/src/datatypes/src/vectors/binary.rs
index d4332976e5c4..74b7cf05a347 100644
--- a/src/datatypes/src/vectors/binary.rs
+++ b/src/datatypes/src/vectors/binary.rs
@@ -1,8 +1,7 @@
use std::any::Any;
use std::sync::Arc;
-use arrow::array::{Array, ArrayRef};
-use arrow::array::{BinaryValueIter, MutableArray};
+use arrow::array::{Array, ArrayRef, BinaryValueIter, MutableArray};
use arrow::bitmap::utils::ZipValidity;
use snafu::{OptionExt, ResultExt};
diff --git a/src/datatypes/src/vectors/constant.rs b/src/datatypes/src/vectors/constant.rs
index dcbd8b87b540..d793f7519e88 100644
--- a/src/datatypes/src/vectors/constant.rs
+++ b/src/datatypes/src/vectors/constant.rs
@@ -9,8 +9,7 @@ use crate::data_type::ConcreteDataType;
use crate::error::{Result, SerializeSnafu};
use crate::serialize::Serializable;
use crate::value::{Value, ValueRef};
-use crate::vectors::Helper;
-use crate::vectors::{BooleanVector, Validity, Vector, VectorRef};
+use crate::vectors::{BooleanVector, Helper, Validity, Vector, VectorRef};
#[derive(Clone)]
pub struct ConstantVector {
diff --git a/src/datatypes/src/vectors/null.rs b/src/datatypes/src/vectors/null.rs
index 84dd8f75672b..6f01123e2f9c 100644
--- a/src/datatypes/src/vectors/null.rs
+++ b/src/datatypes/src/vectors/null.rs
@@ -2,8 +2,7 @@ use std::any::Any;
use std::fmt;
use std::sync::Arc;
-use arrow::array::ArrayRef;
-use arrow::array::{Array, NullArray};
+use arrow::array::{Array, ArrayRef, NullArray};
use arrow::datatypes::DataType as ArrowDataType;
use snafu::{ensure, OptionExt};
diff --git a/src/datatypes/src/vectors/primitive.rs b/src/datatypes/src/vectors/primitive.rs
index 3d508ed63914..6c4521b6979d 100644
--- a/src/datatypes/src/vectors/primitive.rs
+++ b/src/datatypes/src/vectors/primitive.rs
@@ -9,10 +9,8 @@ use serde_json::Value as JsonValue;
use snafu::{OptionExt, ResultExt};
use crate::data_type::{ConcreteDataType, DataType};
-use crate::error::ConversionSnafu;
-use crate::error::{Result, SerializeSnafu};
-use crate::scalars::{Scalar, ScalarRef};
-use crate::scalars::{ScalarVector, ScalarVectorBuilder};
+use crate::error::{ConversionSnafu, Result, SerializeSnafu};
+use crate::scalars::{Scalar, ScalarRef, ScalarVector, ScalarVectorBuilder};
use crate::serialize::Serializable;
use crate::types::{Primitive, PrimitiveElement};
use crate::value::{Value, ValueRef};
diff --git a/src/frontend/src/expr_factory.rs b/src/frontend/src/expr_factory.rs
index c3a3ef7a8cac..4e554cbcd60e 100644
--- a/src/frontend/src/expr_factory.rs
+++ b/src/frontend/src/expr_factory.rs
@@ -10,11 +10,9 @@ use sql::statements::create::{CreateTable, TIME_INDEX};
use sql::statements::{column_def_to_schema, table_idents_to_full_name};
use sqlparser::ast::{ColumnDef, TableConstraint};
-use crate::error::InvalidSqlSnafu;
-use crate::error::Result;
use crate::error::{
BuildCreateExprOnInsertionSnafu, ColumnDataTypeSnafu, ConvertColumnDefaultConstraintSnafu,
- ParseSqlSnafu,
+ InvalidSqlSnafu, ParseSqlSnafu, Result,
};
pub type CreateExprFactoryRef = Arc<dyn CreateExprFactory + Send + Sync>;
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index 81040efbe40c..375c5e0db574 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -34,10 +34,11 @@ use servers::query_handler::{
PrometheusProtocolHandler, ScriptHandler, ScriptHandlerRef, SqlQueryHandler,
};
use snafu::prelude::*;
+use sql::dialect::GenericDialect;
+use sql::parser::ParserContext;
use sql::statements::create::Partitions;
use sql::statements::insert::Insert;
use sql::statements::statement::Statement;
-use sql::{dialect::GenericDialect, parser::ParserContext};
use crate::catalog::FrontendCatalogManager;
use crate::datanode::DatanodeClients;
@@ -713,10 +714,10 @@ mod tests {
use std::assert_matches::assert_matches;
use api::v1::codec::{InsertBatch, SelectResult};
+ use api::v1::column::SemanticType;
use api::v1::{
- admin_expr, admin_result, column, column::SemanticType, object_expr, object_result,
- select_expr, Column, ColumnDataType, ColumnDef as GrpcColumnDef, ExprHeader, MutateResult,
- SelectExpr,
+ admin_expr, admin_result, column, object_expr, object_result, select_expr, Column,
+ ColumnDataType, ColumnDef as GrpcColumnDef, ExprHeader, MutateResult, SelectExpr,
};
use datatypes::schema::ColumnDefaultConstraint;
use datatypes::value::Value;
diff --git a/src/frontend/src/instance/influxdb.rs b/src/frontend/src/instance/influxdb.rs
index 601a1750f49e..51d31cc8a989 100644
--- a/src/frontend/src/instance/influxdb.rs
+++ b/src/frontend/src/instance/influxdb.rs
@@ -7,8 +7,9 @@ use async_trait::async_trait;
use common_catalog::consts::DEFAULT_CATALOG_NAME;
use common_error::prelude::BoxedError;
use common_insert::column_to_vector;
+use servers::error as server_error;
use servers::influxdb::InfluxdbRequest;
-use servers::{error as server_error, query_handler::InfluxdbLineProtocolHandler};
+use servers::query_handler::InfluxdbLineProtocolHandler;
use snafu::{OptionExt, ResultExt};
use table::requests::InsertRequest;
diff --git a/src/frontend/src/instance/prometheus.rs b/src/frontend/src/instance/prometheus.rs
index babc91ef33ce..88b2a19b527c 100644
--- a/src/frontend/src/instance/prometheus.rs
+++ b/src/frontend/src/instance/prometheus.rs
@@ -1,9 +1,7 @@
-use api::prometheus::remote::{
- read_request::ResponseType, Query, QueryResult, ReadRequest, ReadResponse, WriteRequest,
-};
+use api::prometheus::remote::read_request::ResponseType;
+use api::prometheus::remote::{Query, QueryResult, ReadRequest, ReadResponse, WriteRequest};
use async_trait::async_trait;
-use client::ObjectResult;
-use client::{Database, Select};
+use client::{Database, ObjectResult, Select};
use common_error::prelude::BoxedError;
use common_telemetry::logging;
use prost::Message;
@@ -162,9 +160,8 @@ impl PrometheusProtocolHandler for Instance {
#[cfg(test)]
mod tests {
- use api::prometheus::remote::{
- label_matcher::Type as MatcherType, Label, LabelMatcher, Sample,
- };
+ use api::prometheus::remote::label_matcher::Type as MatcherType;
+ use api::prometheus::remote::{Label, LabelMatcher, Sample};
use api::v1::CreateDatabaseExpr;
use super::*;
diff --git a/src/frontend/src/mock.rs b/src/frontend/src/mock.rs
index 3d7b96914d28..d7835d6c7e58 100644
--- a/src/frontend/src/mock.rs
+++ b/src/frontend/src/mock.rs
@@ -4,12 +4,10 @@ use std::fmt::Formatter;
use std::sync::Arc;
use api::v1::InsertExpr;
-use client::ObjectResult;
-use client::{Database, Select};
+use client::{Database, ObjectResult, Select};
use common_query::prelude::Expr;
use common_query::Output;
-use common_recordbatch::util;
-use common_recordbatch::RecordBatches;
+use common_recordbatch::{util, RecordBatches};
use datafusion::logical_plan::{LogicalPlan as DfLogicPlan, LogicalPlanBuilder};
use datafusion_expr::Expr as DfExpr;
use datatypes::prelude::Value;
diff --git a/src/frontend/src/spliter.rs b/src/frontend/src/spliter.rs
index f0db03209e7e..c9ed76f172a5 100644
--- a/src/frontend/src/spliter.rs
+++ b/src/frontend/src/spliter.rs
@@ -1,18 +1,14 @@
use std::collections::HashMap;
use datatypes::value::Value;
-use datatypes::vectors::VectorBuilder;
-use datatypes::vectors::VectorRef;
-use snafu::ensure;
-use snafu::OptionExt;
+use datatypes::vectors::{VectorBuilder, VectorRef};
+use snafu::{ensure, OptionExt};
use store_api::storage::RegionNumber;
use table::requests::InsertRequest;
-use crate::error::Error;
-use crate::error::FindPartitionColumnSnafu;
-use crate::error::FindRegionSnafu;
-use crate::error::InvalidInsertRequestSnafu;
-use crate::error::Result;
+use crate::error::{
+ Error, FindPartitionColumnSnafu, FindRegionSnafu, InvalidInsertRequestSnafu, Result,
+};
use crate::partitioning::PartitionRuleRef;
pub type DistInsertRequest = HashMap<RegionNumber, InsertRequest>;
@@ -159,15 +155,15 @@ fn partition_insert_request(
#[cfg(test)]
mod tests {
use std::any::Any;
- use std::{collections::HashMap, result::Result, sync::Arc};
+ use std::collections::HashMap;
+ use std::result::Result;
+ use std::sync::Arc;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
- use datatypes::{
- data_type::ConcreteDataType,
- types::{BooleanType, StringType},
- value::Value,
- vectors::VectorBuilder,
- };
+ use datatypes::data_type::ConcreteDataType;
+ use datatypes::types::{BooleanType, StringType};
+ use datatypes::value::Value;
+ use datatypes::vectors::VectorBuilder;
use serde::{Deserialize, Serialize};
use store_api::storage::RegionNumber;
use table::requests::InsertRequest;
@@ -176,10 +172,8 @@ mod tests {
check_req, find_partitioning_values, partition_insert_request, partition_values,
WriteSpliter,
};
- use crate::{
- error::Error,
- partitioning::{PartitionExpr, PartitionRule, PartitionRuleRef},
- };
+ use crate::error::Error;
+ use crate::partitioning::{PartitionExpr, PartitionRule, PartitionRuleRef};
#[test]
fn test_insert_req_check() {
diff --git a/src/frontend/src/table.rs b/src/frontend/src/table.rs
index 2f32847a5d76..c63e6222249a 100644
--- a/src/frontend/src/table.rs
+++ b/src/frontend/src/table.rs
@@ -443,8 +443,7 @@ mod test {
use common_recordbatch::util;
use datafusion::arrow_print;
use datafusion_common::record_batch::RecordBatch as DfRecordBatch;
- use datafusion_expr::expr_fn::col;
- use datafusion_expr::expr_fn::{and, binary_expr, or};
+ use datafusion_expr::expr_fn::{and, binary_expr, col, or};
use datafusion_expr::lit;
use datanode::datanode::{DatanodeOptions, ObjectStoreConfig};
use datanode::instance::Instance;
diff --git a/src/frontend/src/table/insert.rs b/src/frontend/src/table/insert.rs
index 252fbeb2c0b4..b7f3afd7f268 100644
--- a/src/frontend/src/table/insert.rs
+++ b/src/frontend/src/table/insert.rs
@@ -2,19 +2,13 @@ use std::collections::HashMap;
use std::sync::Arc;
use api::helper::ColumnDataTypeWrapper;
-use api::v1::codec;
use api::v1::codec::InsertBatch;
use api::v1::column::SemanticType;
-use api::v1::insert_expr;
use api::v1::insert_expr::Expr;
-use api::v1::Column;
-use api::v1::InsertExpr;
-use api::v1::MutateResult;
+use api::v1::{codec, insert_expr, Column, InsertExpr, MutateResult};
use client::{Database, ObjectResult};
use datatypes::prelude::ConcreteDataType;
-use snafu::ensure;
-use snafu::OptionExt;
-use snafu::ResultExt;
+use snafu::{ensure, OptionExt, ResultExt};
use store_api::storage::RegionNumber;
use table::requests::InsertRequest;
@@ -144,9 +138,13 @@ fn to_insert_expr(region_number: RegionNumber, insert: InsertRequest) -> Result<
mod tests {
use std::collections::HashMap;
- use api::v1::{codec::InsertBatch, insert_expr::Expr, ColumnDataType, InsertExpr};
+ use api::v1::codec::InsertBatch;
+ use api::v1::insert_expr::Expr;
+ use api::v1::{ColumnDataType, InsertExpr};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
- use datatypes::{prelude::ConcreteDataType, types::StringType, vectors::VectorBuilder};
+ use datatypes::prelude::ConcreteDataType;
+ use datatypes::types::StringType;
+ use datatypes::vectors::VectorBuilder;
use table::requests::InsertRequest;
use super::to_insert_expr;
diff --git a/src/log-store/src/fs/chunk.rs b/src/log-store/src/fs/chunk.rs
index 3902801e0099..bd5d07ae5f12 100644
--- a/src/log-store/src/fs/chunk.rs
+++ b/src/log-store/src/fs/chunk.rs
@@ -1,7 +1,6 @@
use std::collections::LinkedList;
-use common_base::buffer::Buffer;
-use common_base::buffer::UnderflowSnafu;
+use common_base::buffer::{Buffer, UnderflowSnafu};
use snafu::ensure;
pub const DEFAULT_CHUNK_SIZE: usize = 4096;
diff --git a/src/log-store/src/fs/file.rs b/src/log-store/src/fs/file.rs
index 8f3afb92b3de..8f153b07f767 100644
--- a/src/log-store/src/fs/file.rs
+++ b/src/log-store/src/fs/file.rs
@@ -5,8 +5,7 @@ use std::sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
use async_stream::stream;
-use byteorder::ByteOrder;
-use byteorder::LittleEndian;
+use byteorder::{ByteOrder, LittleEndian};
use bytes::{Bytes, BytesMut};
use common_error::ext::BoxedError;
use common_telemetry::logging::{error, info};
@@ -18,8 +17,7 @@ use store_api::logstore::entry::{Encode, Entry, Id, Offset};
use store_api::logstore::entry_stream::EntryStream;
use store_api::logstore::namespace::Namespace;
use tokio::sync::mpsc::error::TryRecvError;
-use tokio::sync::mpsc::Receiver;
-use tokio::sync::mpsc::Sender as MpscSender;
+use tokio::sync::mpsc::{Receiver, Sender as MpscSender};
use tokio::sync::oneshot::Sender as OneshotSender;
use tokio::sync::{oneshot, Notify};
use tokio::task::JoinHandle;
diff --git a/src/log-store/src/fs/io/unix.rs b/src/log-store/src/fs/io/unix.rs
index 43e38da65ebc..f2912a3b7318 100644
--- a/src/log-store/src/fs/io/unix.rs
+++ b/src/log-store/src/fs/io/unix.rs
@@ -3,8 +3,7 @@ use std::os::unix::fs::FileExt;
use snafu::ResultExt;
-use crate::error::Error;
-use crate::error::IoSnafu;
+use crate::error::{Error, IoSnafu};
pub fn pread_exact(file: &File, buf: &mut [u8], offset: u64) -> Result<(), Error> {
file.read_exact_at(buf, offset as u64).context(IoSnafu)
diff --git a/src/log-store/src/fs/log.rs b/src/log-store/src/fs/log.rs
index 4bca3425a6a1..1019284d0deb 100644
--- a/src/log-store/src/fs/log.rs
+++ b/src/log-store/src/fs/log.rs
@@ -5,8 +5,7 @@ use std::sync::Arc;
use arc_swap::ArcSwap;
use async_stream::stream;
use common_telemetry::{error, info, warn};
-use futures::pin_mut;
-use futures::StreamExt;
+use futures::{pin_mut, StreamExt};
use snafu::{OptionExt, ResultExt};
use store_api::logstore::entry::{Encode, Entry, Id};
use store_api::logstore::entry_stream::SendableEntryStream;
@@ -272,7 +271,8 @@ impl LogStore for LocalFileLogStore {
#[cfg(test)]
mod tests {
use futures_util::StreamExt;
- use rand::{distributions::Alphanumeric, Rng};
+ use rand::distributions::Alphanumeric;
+ use rand::Rng;
use store_api::logstore::entry::Entry;
use tempdir::TempDir;
diff --git a/src/log-store/src/fs/noop.rs b/src/log-store/src/fs/noop.rs
index 2d55d845f565..5842811311fe 100644
--- a/src/log-store/src/fs/noop.rs
+++ b/src/log-store/src/fs/noop.rs
@@ -1,8 +1,11 @@
+use store_api::logstore::entry::Id;
use store_api::logstore::namespace::Id as NamespaceId;
-use store_api::logstore::{entry::Id, LogStore};
+use store_api::logstore::LogStore;
use crate::error::{Error, Result};
-use crate::fs::{entry::EntryImpl, namespace::LocalNamespace, AppendResponseImpl};
+use crate::fs::entry::EntryImpl;
+use crate::fs::namespace::LocalNamespace;
+use crate::fs::AppendResponseImpl;
/// A noop log store which only for test
// TODO: Add a test feature
diff --git a/src/log-store/src/test_util/log_store_util.rs b/src/log-store/src/test_util/log_store_util.rs
index b8c4f5fb03c5..2c9d2ad39d6b 100644
--- a/src/log-store/src/test_util/log_store_util.rs
+++ b/src/log-store/src/test_util/log_store_util.rs
@@ -1,6 +1,7 @@
use tempdir::TempDir;
-use crate::fs::{config::LogConfig, log::LocalFileLogStore};
+use crate::fs::config::LogConfig;
+use crate::fs::log::LocalFileLogStore;
/// Create a tmp directory for write log, used for test.
// TODO: Add a test feature
diff --git a/src/meta-client/examples/meta_client.rs b/src/meta-client/examples/meta_client.rs
index c2935b926cfb..421c0c44a753 100644
--- a/src/meta-client/examples/meta_client.rs
+++ b/src/meta-client/examples/meta_client.rs
@@ -1,21 +1,13 @@
use std::time::Duration;
-use api::v1::meta::HeartbeatRequest;
-use api::v1::meta::Peer;
-use common_grpc::channel_manager::ChannelConfig;
-use common_grpc::channel_manager::ChannelManager;
+use api::v1::meta::{HeartbeatRequest, Peer};
+use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
use meta_client::client::MetaClientBuilder;
-use meta_client::rpc::BatchPutRequest;
-use meta_client::rpc::CompareAndPutRequest;
-use meta_client::rpc::CreateRequest;
-use meta_client::rpc::DeleteRangeRequest;
-use meta_client::rpc::Partition;
-use meta_client::rpc::PutRequest;
-use meta_client::rpc::RangeRequest;
-use meta_client::rpc::TableName;
-use tracing::event;
-use tracing::subscriber;
-use tracing::Level;
+use meta_client::rpc::{
+ BatchPutRequest, CompareAndPutRequest, CreateRequest, DeleteRangeRequest, Partition,
+ PutRequest, RangeRequest, TableName,
+};
+use tracing::{event, subscriber, Level};
use tracing_subscriber::FmtSubscriber;
fn main() {
diff --git a/src/meta-client/src/client.rs b/src/meta-client/src/client.rs
index 72cc4251ad5a..be961c8dd56a 100644
--- a/src/meta-client/src/client.rs
+++ b/src/meta-client/src/client.rs
@@ -3,31 +3,21 @@ mod load_balance;
mod router;
mod store;
-use common_grpc::channel_manager::ChannelConfig;
-use common_grpc::channel_manager::ChannelManager;
+use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
use common_telemetry::info;
use heartbeat::Client as HeartbeatClient;
use router::Client as RouterClient;
use snafu::OptionExt;
use store::Client as StoreClient;
-pub use self::heartbeat::HeartbeatSender;
-pub use self::heartbeat::HeartbeatStream;
+pub use self::heartbeat::{HeartbeatSender, HeartbeatStream};
use crate::error;
use crate::error::Result;
-use crate::rpc::BatchPutRequest;
-use crate::rpc::BatchPutResponse;
-use crate::rpc::CompareAndPutRequest;
-use crate::rpc::CompareAndPutResponse;
-use crate::rpc::CreateRequest;
-use crate::rpc::DeleteRangeRequest;
-use crate::rpc::DeleteRangeResponse;
-use crate::rpc::PutRequest;
-use crate::rpc::PutResponse;
-use crate::rpc::RangeRequest;
-use crate::rpc::RangeResponse;
-use crate::rpc::RouteRequest;
-use crate::rpc::RouteResponse;
+use crate::rpc::{
+ BatchPutRequest, BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, CreateRequest,
+ DeleteRangeRequest, DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
+ RouteRequest, RouteResponse,
+};
pub type Id = (u64, u64);
@@ -273,17 +263,14 @@ impl MetaClient {
mod tests {
use std::sync::Arc;
- use api::v1::meta::HeartbeatRequest;
- use api::v1::meta::Peer;
+ use api::v1::meta::{HeartbeatRequest, Peer};
use meta_srv::metasrv::Context;
- use meta_srv::selector::Namespace;
- use meta_srv::selector::Selector;
+ use meta_srv::selector::{Namespace, Selector};
use meta_srv::Result as MetaResult;
use super::*;
use crate::mocks;
- use crate::rpc::Partition;
- use crate::rpc::TableName;
+ use crate::rpc::{Partition, TableName};
#[tokio::test]
async fn test_meta_client_builder() {
diff --git a/src/meta-client/src/client/heartbeat.rs b/src/meta-client/src/client/heartbeat.rs
index aef4aad71368..a4cff3fb1694 100644
--- a/src/meta-client/src/client/heartbeat.rs
+++ b/src/meta-client/src/client/heartbeat.rs
@@ -2,18 +2,11 @@ use std::collections::HashSet;
use std::sync::Arc;
use api::v1::meta::heartbeat_client::HeartbeatClient;
-use api::v1::meta::AskLeaderRequest;
-use api::v1::meta::HeartbeatRequest;
-use api::v1::meta::HeartbeatResponse;
-use api::v1::meta::RequestHeader;
+use api::v1::meta::{AskLeaderRequest, HeartbeatRequest, HeartbeatResponse, RequestHeader};
use common_grpc::channel_manager::ChannelManager;
-use common_telemetry::debug;
-use common_telemetry::info;
-use snafu::ensure;
-use snafu::OptionExt;
-use snafu::ResultExt;
-use tokio::sync::mpsc;
-use tokio::sync::RwLock;
+use common_telemetry::{debug, info};
+use snafu::{ensure, OptionExt, ResultExt};
+use tokio::sync::{mpsc, RwLock};
use tokio_stream::wrappers::ReceiverStream;
use tonic::transport::Channel;
use tonic::Streaming;
diff --git a/src/meta-client/src/client/router.rs b/src/meta-client/src/client/router.rs
index 52277bb4fe29..5698cf2c41eb 100644
--- a/src/meta-client/src/client/router.rs
+++ b/src/meta-client/src/client/router.rs
@@ -2,18 +2,13 @@ use std::collections::HashSet;
use std::sync::Arc;
use api::v1::meta::router_client::RouterClient;
-use api::v1::meta::CreateRequest;
-use api::v1::meta::RouteRequest;
-use api::v1::meta::RouteResponse;
+use api::v1::meta::{CreateRequest, RouteRequest, RouteResponse};
use common_grpc::channel_manager::ChannelManager;
-use snafu::ensure;
-use snafu::OptionExt;
-use snafu::ResultExt;
+use snafu::{ensure, OptionExt, ResultExt};
use tokio::sync::RwLock;
use tonic::transport::Channel;
-use crate::client::load_balance as lb;
-use crate::client::Id;
+use crate::client::{load_balance as lb, Id};
use crate::error;
use crate::error::Result;
diff --git a/src/meta-client/src/client/store.rs b/src/meta-client/src/client/store.rs
index 36b236c730cf..e9cbfd2fb271 100644
--- a/src/meta-client/src/client/store.rs
+++ b/src/meta-client/src/client/store.rs
@@ -2,25 +2,16 @@ use std::collections::HashSet;
use std::sync::Arc;
use api::v1::meta::store_client::StoreClient;
-use api::v1::meta::BatchPutRequest;
-use api::v1::meta::BatchPutResponse;
-use api::v1::meta::CompareAndPutRequest;
-use api::v1::meta::CompareAndPutResponse;
-use api::v1::meta::DeleteRangeRequest;
-use api::v1::meta::DeleteRangeResponse;
-use api::v1::meta::PutRequest;
-use api::v1::meta::PutResponse;
-use api::v1::meta::RangeRequest;
-use api::v1::meta::RangeResponse;
+use api::v1::meta::{
+ BatchPutRequest, BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse,
+ DeleteRangeRequest, DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
+};
use common_grpc::channel_manager::ChannelManager;
-use snafu::ensure;
-use snafu::OptionExt;
-use snafu::ResultExt;
+use snafu::{ensure, OptionExt, ResultExt};
use tokio::sync::RwLock;
use tonic::transport::Channel;
-use crate::client::load_balance as lb;
-use crate::client::Id;
+use crate::client::{load_balance as lb, Id};
use crate::error;
use crate::error::Result;
diff --git a/src/meta-client/src/lib.rs b/src/meta-client/src/lib.rs
index 4226cdbea67b..e02c5b921652 100644
--- a/src/meta-client/src/lib.rs
+++ b/src/meta-client/src/lib.rs
@@ -1,5 +1,4 @@
-use serde::Deserialize;
-use serde::Serialize;
+use serde::{Deserialize, Serialize};
pub mod client;
pub mod error;
diff --git a/src/meta-client/src/mocks.rs b/src/meta-client/src/mocks.rs
index 11982241faeb..99faaebb2fc5 100644
--- a/src/meta-client/src/mocks.rs
+++ b/src/meta-client/src/mocks.rs
@@ -2,8 +2,7 @@ use meta_srv::metasrv::SelectorRef;
use meta_srv::mocks as server_mock;
use meta_srv::mocks::MockInfo;
-use crate::client::MetaClient;
-use crate::client::MetaClientBuilder;
+use crate::client::{MetaClient, MetaClientBuilder};
pub async fn mock_client_with_memstore() -> MetaClient {
let mock_info = server_mock::mock_with_memstore().await;
diff --git a/src/meta-client/src/rpc.rs b/src/meta-client/src/rpc.rs
index 5a6d79d4bfdb..2adeb01339ca 100644
--- a/src/meta-client/src/rpc.rs
+++ b/src/meta-client/src/rpc.rs
@@ -4,28 +4,18 @@ pub mod util;
use std::fmt::{Display, Formatter};
-use api::v1::meta::KeyValue as PbKeyValue;
-use api::v1::meta::Peer as PbPeer;
-use api::v1::meta::ResponseHeader as PbResponseHeader;
-use api::v1::meta::TableName as PbTableName;
-pub use router::CreateRequest;
-pub use router::Partition;
-pub use router::Region;
-pub use router::RouteRequest;
-pub use router::RouteResponse;
-pub use router::Table;
-pub use router::TableRoute;
+use api::v1::meta::{
+ KeyValue as PbKeyValue, Peer as PbPeer, ResponseHeader as PbResponseHeader,
+ TableName as PbTableName,
+};
+pub use router::{
+ CreateRequest, Partition, Region, RouteRequest, RouteResponse, Table, TableRoute,
+};
use serde::{Deserialize, Serialize};
-pub use store::BatchPutRequest;
-pub use store::BatchPutResponse;
-pub use store::CompareAndPutRequest;
-pub use store::CompareAndPutResponse;
-pub use store::DeleteRangeRequest;
-pub use store::DeleteRangeResponse;
-pub use store::PutRequest;
-pub use store::PutResponse;
-pub use store::RangeRequest;
-pub use store::RangeResponse;
+pub use store::{
+ BatchPutRequest, BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse,
+ DeleteRangeRequest, DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
+};
#[derive(Debug, Clone)]
pub struct ResponseHeader(PbResponseHeader);
diff --git a/src/meta-client/src/rpc/router.rs b/src/meta-client/src/rpc/router.rs
index 936fa076fb05..c1eb4a807537 100644
--- a/src/meta-client/src/rpc/router.rs
+++ b/src/meta-client/src/rpc/router.rs
@@ -1,21 +1,15 @@
use std::collections::HashMap;
-use api::v1::meta::CreateRequest as PbCreateRequest;
-use api::v1::meta::Partition as PbPartition;
-use api::v1::meta::Region as PbRegion;
-use api::v1::meta::RouteRequest as PbRouteRequest;
-use api::v1::meta::RouteResponse as PbRouteResponse;
-use api::v1::meta::Table as PbTable;
-use serde::Deserialize;
-use serde::Serialize;
-use serde::Serializer;
+use api::v1::meta::{
+ CreateRequest as PbCreateRequest, Partition as PbPartition, Region as PbRegion,
+ RouteRequest as PbRouteRequest, RouteResponse as PbRouteResponse, Table as PbTable,
+};
+use serde::{Deserialize, Serialize, Serializer};
use snafu::OptionExt;
use crate::error;
use crate::error::Result;
-use crate::rpc::util;
-use crate::rpc::Peer;
-use crate::rpc::TableName;
+use crate::rpc::{util, Peer, TableName};
#[derive(Debug, Clone, Default)]
pub struct RouteRequest {
@@ -266,15 +260,11 @@ impl From<PbPartition> for Partition {
#[cfg(test)]
mod tests {
- use api::v1::meta::Partition as PbPartition;
- use api::v1::meta::Peer as PbPeer;
- use api::v1::meta::Region as PbRegion;
- use api::v1::meta::RegionRoute as PbRegionRoute;
- use api::v1::meta::RouteRequest as PbRouteRequest;
- use api::v1::meta::RouteResponse as PbRouteResponse;
- use api::v1::meta::Table as PbTable;
- use api::v1::meta::TableName as PbTableName;
- use api::v1::meta::TableRoute as PbTableRoute;
+ use api::v1::meta::{
+ Partition as PbPartition, Peer as PbPeer, Region as PbRegion, RegionRoute as PbRegionRoute,
+ RouteRequest as PbRouteRequest, RouteResponse as PbRouteResponse, Table as PbTable,
+ TableName as PbTableName, TableRoute as PbTableRoute,
+ };
use super::*;
diff --git a/src/meta-client/src/rpc/store.rs b/src/meta-client/src/rpc/store.rs
index 4ecb1cb25b7c..9b878523c786 100644
--- a/src/meta-client/src/rpc/store.rs
+++ b/src/meta-client/src/rpc/store.rs
@@ -1,20 +1,15 @@
-use api::v1::meta::BatchPutRequest as PbBatchPutRequest;
-use api::v1::meta::BatchPutResponse as PbBatchPutResponse;
-use api::v1::meta::CompareAndPutRequest as PbCompareAndPutRequest;
-use api::v1::meta::CompareAndPutResponse as PbCompareAndPutResponse;
-use api::v1::meta::DeleteRangeRequest as PbDeleteRangeRequest;
-use api::v1::meta::DeleteRangeResponse as PbDeleteRangeResponse;
-use api::v1::meta::KeyValue as PbKeyValue;
-use api::v1::meta::PutRequest as PbPutRequest;
-use api::v1::meta::PutResponse as PbPutResponse;
-use api::v1::meta::RangeRequest as PbRangeRequest;
-use api::v1::meta::RangeResponse as PbRangeResponse;
+use api::v1::meta::{
+ BatchPutRequest as PbBatchPutRequest, BatchPutResponse as PbBatchPutResponse,
+ CompareAndPutRequest as PbCompareAndPutRequest,
+ CompareAndPutResponse as PbCompareAndPutResponse, DeleteRangeRequest as PbDeleteRangeRequest,
+ DeleteRangeResponse as PbDeleteRangeResponse, KeyValue as PbKeyValue,
+ PutRequest as PbPutRequest, PutResponse as PbPutResponse, RangeRequest as PbRangeRequest,
+ RangeResponse as PbRangeResponse,
+};
use crate::error;
use crate::error::Result;
-use crate::rpc::util;
-use crate::rpc::KeyValue;
-use crate::rpc::ResponseHeader;
+use crate::rpc::{util, KeyValue, ResponseHeader};
#[derive(Debug, Clone, Default)]
pub struct RangeRequest {
@@ -514,17 +509,14 @@ impl DeleteRangeResponse {
#[cfg(test)]
mod tests {
- use api::v1::meta::BatchPutRequest as PbBatchPutRequest;
- use api::v1::meta::BatchPutResponse as PbBatchPutResponse;
- use api::v1::meta::CompareAndPutRequest as PbCompareAndPutRequest;
- use api::v1::meta::CompareAndPutResponse as PbCompareAndPutResponse;
- use api::v1::meta::DeleteRangeRequest as PbDeleteRangeRequest;
- use api::v1::meta::DeleteRangeResponse as PbDeleteRangeResponse;
- use api::v1::meta::KeyValue as PbKeyValue;
- use api::v1::meta::PutRequest as PbPutRequest;
- use api::v1::meta::PutResponse as PbPutResponse;
- use api::v1::meta::RangeRequest as PbRangeRequest;
- use api::v1::meta::RangeResponse as PbRangeResponse;
+ use api::v1::meta::{
+ BatchPutRequest as PbBatchPutRequest, BatchPutResponse as PbBatchPutResponse,
+ CompareAndPutRequest as PbCompareAndPutRequest,
+ CompareAndPutResponse as PbCompareAndPutResponse,
+ DeleteRangeRequest as PbDeleteRangeRequest, DeleteRangeResponse as PbDeleteRangeResponse,
+ KeyValue as PbKeyValue, PutRequest as PbPutRequest, PutResponse as PbPutResponse,
+ RangeRequest as PbRangeRequest, RangeResponse as PbRangeResponse,
+ };
use super::*;
diff --git a/src/meta-srv/examples/kv_store.rs b/src/meta-srv/examples/kv_store.rs
index 707c1df152d1..466cd9c5767e 100644
--- a/src/meta-srv/examples/kv_store.rs
+++ b/src/meta-srv/examples/kv_store.rs
@@ -1,10 +1,6 @@
-use api::v1::meta::DeleteRangeRequest;
-use api::v1::meta::PutRequest;
-use api::v1::meta::RangeRequest;
+use api::v1::meta::{DeleteRangeRequest, PutRequest, RangeRequest};
use meta_srv::service::store::etcd::EtcdStore;
-use tracing::event;
-use tracing::subscriber;
-use tracing::Level;
+use tracing::{event, subscriber, Level};
use tracing_subscriber::FmtSubscriber;
fn main() {
diff --git a/src/meta-srv/src/bootstrap.rs b/src/meta-srv/src/bootstrap.rs
index 4ce1f847caef..c3616ee18234 100644
--- a/src/meta-srv/src/bootstrap.rs
+++ b/src/meta-srv/src/bootstrap.rs
@@ -7,8 +7,7 @@ use tokio_stream::wrappers::TcpListenerStream;
use crate::election::etcd::EtcdElection;
use crate::error;
-use crate::metasrv::MetaSrv;
-use crate::metasrv::MetaSrvOptions;
+use crate::metasrv::{MetaSrv, MetaSrvOptions};
use crate::service::admin;
use crate::service::store::etcd::EtcdStore;
diff --git a/src/meta-srv/src/election/etcd.rs b/src/meta-srv/src/election/etcd.rs
index e94653e34d2a..d1dcf982bbde 100644
--- a/src/meta-srv/src/election/etcd.rs
+++ b/src/meta-srv/src/election/etcd.rs
@@ -1,22 +1,15 @@
-use std::sync::atomic::AtomicBool;
-use std::sync::atomic::Ordering;
+use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::time::Duration;
-use common_telemetry::info;
-use common_telemetry::warn;
+use common_telemetry::{info, warn};
use etcd_client::Client;
-use snafu::OptionExt;
-use snafu::ResultExt;
+use snafu::{OptionExt, ResultExt};
-use crate::election::Election;
-use crate::election::ELECTION_KEY;
-use crate::election::LEASE_SECS;
-use crate::election::PROCLAIM_PERIOD_SECS;
+use crate::election::{Election, ELECTION_KEY, LEASE_SECS, PROCLAIM_PERIOD_SECS};
use crate::error;
use crate::error::Result;
-use crate::metasrv::ElectionRef;
-use crate::metasrv::LeaderValue;
+use crate::metasrv::{ElectionRef, LeaderValue};
pub struct EtcdElection {
leader_value: String,
diff --git a/src/meta-srv/src/error.rs b/src/meta-srv/src/error.rs
index b35e9cd046ac..5817a03f7201 100644
--- a/src/meta-srv/src/error.rs
+++ b/src/meta-srv/src/error.rs
@@ -1,6 +1,5 @@
use common_error::prelude::*;
-use tonic::Code;
-use tonic::Status;
+use tonic::{Code, Status};
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
diff --git a/src/meta-srv/src/handler.rs b/src/meta-srv/src/handler.rs
index de2ca2b50d09..c40dd8ce8d6b 100644
--- a/src/meta-srv/src/handler.rs
+++ b/src/meta-srv/src/handler.rs
@@ -5,9 +5,7 @@ pub(crate) mod response_header;
use std::collections::BTreeMap;
use std::sync::Arc;
-use api::v1::meta::HeartbeatRequest;
-use api::v1::meta::HeartbeatResponse;
-use api::v1::meta::ResponseHeader;
+use api::v1::meta::{HeartbeatRequest, HeartbeatResponse, ResponseHeader};
use common_telemetry::info;
use tokio::sync::mpsc::Sender;
use tokio::sync::RwLock;
diff --git a/src/meta-srv/src/handler/check_leader.rs b/src/meta-srv/src/handler/check_leader.rs
index 67ca01096245..13bae8cfddee 100644
--- a/src/meta-srv/src/handler/check_leader.rs
+++ b/src/meta-srv/src/handler/check_leader.rs
@@ -1,9 +1,7 @@
-use api::v1::meta::Error;
-use api::v1::meta::HeartbeatRequest;
+use api::v1::meta::{Error, HeartbeatRequest};
use crate::error::Result;
-use crate::handler::HeartbeatAccumulator;
-use crate::handler::HeartbeatHandler;
+use crate::handler::{HeartbeatAccumulator, HeartbeatHandler};
use crate::metasrv::Context;
pub struct CheckLeaderHandler;
diff --git a/src/meta-srv/src/handler/datanode_lease.rs b/src/meta-srv/src/handler/datanode_lease.rs
index 06ab50a0e814..da33c7d2502e 100644
--- a/src/meta-srv/src/handler/datanode_lease.rs
+++ b/src/meta-srv/src/handler/datanode_lease.rs
@@ -1,13 +1,10 @@
-use api::v1::meta::HeartbeatRequest;
-use api::v1::meta::PutRequest;
+use api::v1::meta::{HeartbeatRequest, PutRequest};
use common_telemetry::info;
use common_time::util as time_util;
use crate::error::Result;
-use crate::handler::HeartbeatAccumulator;
-use crate::handler::HeartbeatHandler;
-use crate::keys::LeaseKey;
-use crate::keys::LeaseValue;
+use crate::handler::{HeartbeatAccumulator, HeartbeatHandler};
+use crate::keys::{LeaseKey, LeaseValue};
use crate::metasrv::Context;
pub struct DatanodeLeaseHandler;
@@ -57,9 +54,7 @@ mod tests {
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
- use api::v1::meta::Peer;
- use api::v1::meta::RangeRequest;
- use api::v1::meta::RequestHeader;
+ use api::v1::meta::{Peer, RangeRequest, RequestHeader};
use super::*;
use crate::service::store::memory::MemStore;
diff --git a/src/meta-srv/src/handler/response_header.rs b/src/meta-srv/src/handler/response_header.rs
index be87232fd625..32ab981f85ce 100644
--- a/src/meta-srv/src/handler/response_header.rs
+++ b/src/meta-srv/src/handler/response_header.rs
@@ -1,10 +1,7 @@
-use api::v1::meta::HeartbeatRequest;
-use api::v1::meta::ResponseHeader;
-use api::v1::meta::PROTOCOL_VERSION;
+use api::v1::meta::{HeartbeatRequest, ResponseHeader, PROTOCOL_VERSION};
use crate::error::Result;
-use crate::handler::HeartbeatAccumulator;
-use crate::handler::HeartbeatHandler;
+use crate::handler::{HeartbeatAccumulator, HeartbeatHandler};
use crate::metasrv::Context;
pub struct ResponseHeaderHandler;
diff --git a/src/meta-srv/src/keys.rs b/src/meta-srv/src/keys.rs
index ff25d55ba8e9..9b4aff18deed 100644
--- a/src/meta-srv/src/keys.rs
+++ b/src/meta-srv/src/keys.rs
@@ -4,11 +4,8 @@ use api::v1::meta::TableName;
use common_catalog::TableGlobalKey;
use lazy_static::lazy_static;
use regex::Regex;
-use serde::Deserialize;
-use serde::Serialize;
-use snafu::ensure;
-use snafu::OptionExt;
-use snafu::ResultExt;
+use serde::{Deserialize, Serialize};
+use snafu::{ensure, OptionExt, ResultExt};
use crate::error;
use crate::error::Result;
diff --git a/src/meta-srv/src/lease.rs b/src/meta-srv/src/lease.rs
index 1e90e201d2a0..f1d64343f1bb 100644
--- a/src/meta-srv/src/lease.rs
+++ b/src/meta-srv/src/lease.rs
@@ -1,9 +1,7 @@
use api::v1::meta::RangeRequest;
use crate::error::Result;
-use crate::keys::LeaseKey;
-use crate::keys::LeaseValue;
-use crate::keys::DN_LEASE_PREFIX;
+use crate::keys::{LeaseKey, LeaseValue, DN_LEASE_PREFIX};
use crate::service::store::kv::KvStoreRef;
use crate::util;
diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs
index 8bd9793a528b..0bc66f55e3de 100644
--- a/src/meta-srv/src/metasrv.rs
+++ b/src/meta-srv/src/metasrv.rs
@@ -1,12 +1,9 @@
-use std::sync::atomic::AtomicBool;
-use std::sync::atomic::Ordering;
+use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use api::v1::meta::Peer;
-use common_telemetry::info;
-use common_telemetry::warn;
-use serde::Deserialize;
-use serde::Serialize;
+use common_telemetry::{info, warn};
+use serde::{Deserialize, Serialize};
use crate::election::Election;
use crate::handler::check_leader::CheckLeaderHandler;
@@ -15,8 +12,7 @@ use crate::handler::response_header::ResponseHeaderHandler;
use crate::handler::HeartbeatHandlerGroup;
use crate::selector::lease_based::LeaseBasedSelector;
use crate::selector::Selector;
-use crate::sequence::Sequence;
-use crate::sequence::SequenceRef;
+use crate::sequence::{Sequence, SequenceRef};
use crate::service::store::kv::KvStoreRef;
pub const TABLE_ID_SEQ: &str = "table_id";
diff --git a/src/meta-srv/src/mocks.rs b/src/meta-srv/src/mocks.rs
index f1ad3d836c97..34ac85b95c37 100644
--- a/src/meta-srv/src/mocks.rs
+++ b/src/meta-srv/src/mocks.rs
@@ -3,13 +3,10 @@ use std::sync::Arc;
use api::v1::meta::heartbeat_server::HeartbeatServer;
use api::v1::meta::router_server::RouterServer;
use api::v1::meta::store_server::StoreServer;
-use common_grpc::channel_manager::ChannelConfig;
-use common_grpc::channel_manager::ChannelManager;
+use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
use tower::service_fn;
-use crate::metasrv::MetaSrv;
-use crate::metasrv::MetaSrvOptions;
-use crate::metasrv::SelectorRef;
+use crate::metasrv::{MetaSrv, MetaSrvOptions, SelectorRef};
use crate::service::store::etcd::EtcdStore;
use crate::service::store::kv::KvStoreRef;
use crate::service::store::memory::MemStore;
diff --git a/src/meta-srv/src/selector/lease_based.rs b/src/meta-srv/src/selector/lease_based.rs
index c4936011b4de..bfbd224120a6 100644
--- a/src/meta-srv/src/selector/lease_based.rs
+++ b/src/meta-srv/src/selector/lease_based.rs
@@ -2,12 +2,10 @@ use api::v1::meta::Peer;
use common_time::util as time_util;
use crate::error::Result;
-use crate::keys::LeaseKey;
-use crate::keys::LeaseValue;
+use crate::keys::{LeaseKey, LeaseValue};
use crate::lease;
use crate::metasrv::Context;
-use crate::selector::Namespace;
-use crate::selector::Selector;
+use crate::selector::{Namespace, Selector};
pub struct LeaseBasedSelector;
diff --git a/src/meta-srv/src/sequence.rs b/src/meta-srv/src/sequence.rs
index 44e89ce4f939..555bcfc77a07 100644
--- a/src/meta-srv/src/sequence.rs
+++ b/src/meta-srv/src/sequence.rs
@@ -134,7 +134,8 @@ mod tests {
use std::sync::Arc;
use super::*;
- use crate::service::store::{kv::KvStore, memory::MemStore};
+ use crate::service::store::kv::KvStore;
+ use crate::service::store::memory::MemStore;
#[tokio::test]
async fn test_sequence() {
diff --git a/src/meta-srv/src/service.rs b/src/meta-srv/src/service.rs
index 0e8fabd8da44..58dc841e6a63 100644
--- a/src/meta-srv/src/service.rs
+++ b/src/meta-srv/src/service.rs
@@ -1,8 +1,7 @@
use std::pin::Pin;
use futures::Stream;
-use tonic::Response;
-use tonic::Status;
+use tonic::{Response, Status};
pub mod admin;
mod heartbeat;
diff --git a/src/meta-srv/src/service/admin.rs b/src/meta-srv/src/service/admin.rs
index 510bcc8fdc86..13e32a6f59fb 100644
--- a/src/meta-srv/src/service/admin.rs
+++ b/src/meta-srv/src/service/admin.rs
@@ -3,14 +3,10 @@ mod health;
use std::collections::HashMap;
use std::convert::Infallible;
use std::sync::Arc;
-use std::task::Context;
-use std::task::Poll;
+use std::task::{Context, Poll};
use tonic::body::BoxBody;
-use tonic::codegen::empty_body;
-use tonic::codegen::http;
-use tonic::codegen::BoxFuture;
-use tonic::codegen::Service;
+use tonic::codegen::{empty_body, http, BoxFuture, Service};
use tonic::transport::NamedService;
use crate::metasrv::MetaSrv;
diff --git a/src/meta-srv/src/service/heartbeat.rs b/src/meta-srv/src/service/heartbeat.rs
index 3d8189ffe9c0..895a4bb9aa61 100644
--- a/src/meta-srv/src/service/heartbeat.rs
+++ b/src/meta-srv/src/service/heartbeat.rs
@@ -1,30 +1,20 @@
use std::io::ErrorKind;
-use std::sync::atomic::AtomicU64;
-use std::sync::atomic::Ordering;
-
-use api::v1::meta::heartbeat_server;
-use api::v1::meta::AskLeaderRequest;
-use api::v1::meta::AskLeaderResponse;
-use api::v1::meta::HeartbeatRequest;
-use api::v1::meta::HeartbeatResponse;
-use api::v1::meta::Peer;
-use api::v1::meta::ResponseHeader;
-use common_telemetry::error;
-use common_telemetry::info;
-use common_telemetry::warn;
+use std::sync::atomic::{AtomicU64, Ordering};
+
+use api::v1::meta::{
+ heartbeat_server, AskLeaderRequest, AskLeaderResponse, HeartbeatRequest, HeartbeatResponse,
+ Peer, ResponseHeader,
+};
+use common_telemetry::{error, info, warn};
use futures::StreamExt;
use tokio::sync::mpsc;
use tokio_stream::wrappers::ReceiverStream;
-use tonic::Request;
-use tonic::Response;
-use tonic::Streaming;
+use tonic::{Request, Response, Streaming};
use crate::error;
use crate::error::Result;
-use crate::metasrv::Context;
-use crate::metasrv::MetaSrv;
-use crate::service::GrpcResult;
-use crate::service::GrpcStream;
+use crate::metasrv::{Context, MetaSrv};
+use crate::service::{GrpcResult, GrpcStream};
static PUSHER_ID: AtomicU64 = AtomicU64::new(0);
diff --git a/src/meta-srv/src/service/router.rs b/src/meta-srv/src/service/router.rs
index 976650fc5ba8..cdb34c3ecc96 100644
--- a/src/meta-srv/src/service/router.rs
+++ b/src/meta-srv/src/service/router.rs
@@ -1,31 +1,16 @@
-use api::v1::meta::router_server;
-use api::v1::meta::CreateRequest;
-use api::v1::meta::Error;
-use api::v1::meta::PeerDict;
-use api::v1::meta::PutRequest;
-use api::v1::meta::RangeRequest;
-use api::v1::meta::Region;
-use api::v1::meta::RegionRoute;
-use api::v1::meta::ResponseHeader;
-use api::v1::meta::RouteRequest;
-use api::v1::meta::RouteResponse;
-use api::v1::meta::Table;
-use api::v1::meta::TableRoute;
-use api::v1::meta::TableRouteValue;
-use common_catalog::TableGlobalKey;
-use common_catalog::TableGlobalValue;
+use api::v1::meta::{
+ router_server, CreateRequest, Error, PeerDict, PutRequest, RangeRequest, Region, RegionRoute,
+ ResponseHeader, RouteRequest, RouteResponse, Table, TableRoute, TableRouteValue,
+};
+use common_catalog::{TableGlobalKey, TableGlobalValue};
use common_telemetry::warn;
-use snafu::OptionExt;
-use snafu::ResultExt;
-use tonic::Request;
-use tonic::Response;
+use snafu::{OptionExt, ResultExt};
+use tonic::{Request, Response};
use crate::error;
use crate::error::Result;
use crate::keys::TableRouteKey;
-use crate::metasrv::Context;
-use crate::metasrv::MetaSrv;
-use crate::metasrv::SelectorRef;
+use crate::metasrv::{Context, MetaSrv, SelectorRef};
use crate::sequence::SequenceRef;
use crate::service::store::kv::KvStoreRef;
use crate::service::GrpcResult;
diff --git a/src/meta-srv/src/service/store.rs b/src/meta-srv/src/service/store.rs
index 47096a067391..beef0408cc80 100644
--- a/src/meta-srv/src/service/store.rs
+++ b/src/meta-srv/src/service/store.rs
@@ -2,19 +2,11 @@ pub mod etcd;
pub mod kv;
pub mod memory;
-use api::v1::meta::store_server;
-use api::v1::meta::BatchPutRequest;
-use api::v1::meta::BatchPutResponse;
-use api::v1::meta::CompareAndPutRequest;
-use api::v1::meta::CompareAndPutResponse;
-use api::v1::meta::DeleteRangeRequest;
-use api::v1::meta::DeleteRangeResponse;
-use api::v1::meta::PutRequest;
-use api::v1::meta::PutResponse;
-use api::v1::meta::RangeRequest;
-use api::v1::meta::RangeResponse;
-use tonic::Request;
-use tonic::Response;
+use api::v1::meta::{
+ store_server, BatchPutRequest, BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse,
+ DeleteRangeRequest, DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
+};
+use tonic::{Request, Response};
use crate::metasrv::MetaSrv;
use crate::service::GrpcResult;
diff --git a/src/meta-srv/src/service/store/etcd.rs b/src/meta-srv/src/service/store/etcd.rs
index d7023cd48e6d..bf1573a7da9f 100644
--- a/src/meta-srv/src/service/store/etcd.rs
+++ b/src/meta-srv/src/service/store/etcd.rs
@@ -1,32 +1,18 @@
use std::sync::Arc;
-use api::v1::meta::BatchPutRequest;
-use api::v1::meta::BatchPutResponse;
-use api::v1::meta::CompareAndPutRequest;
-use api::v1::meta::CompareAndPutResponse;
-use api::v1::meta::DeleteRangeRequest;
-use api::v1::meta::DeleteRangeResponse;
-use api::v1::meta::KeyValue;
-use api::v1::meta::PutRequest;
-use api::v1::meta::PutResponse;
-use api::v1::meta::RangeRequest;
-use api::v1::meta::RangeResponse;
-use api::v1::meta::ResponseHeader;
+use api::v1::meta::{
+ BatchPutRequest, BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse,
+ DeleteRangeRequest, DeleteRangeResponse, KeyValue, PutRequest, PutResponse, RangeRequest,
+ RangeResponse, ResponseHeader,
+};
use common_error::prelude::*;
-use etcd_client::Client;
-use etcd_client::Compare;
-use etcd_client::CompareOp;
-use etcd_client::DeleteOptions;
-use etcd_client::GetOptions;
-use etcd_client::PutOptions;
-use etcd_client::Txn;
-use etcd_client::TxnOp;
-use etcd_client::TxnOpResponse;
+use etcd_client::{
+ Client, Compare, CompareOp, DeleteOptions, GetOptions, PutOptions, Txn, TxnOp, TxnOpResponse,
+};
use crate::error;
use crate::error::Result;
-use crate::service::store::kv::KvStore;
-use crate::service::store::kv::KvStoreRef;
+use crate::service::store::kv::{KvStore, KvStoreRef};
#[derive(Clone)]
pub struct EtcdStore {
diff --git a/src/meta-srv/src/service/store/kv.rs b/src/meta-srv/src/service/store/kv.rs
index 98978ba63e45..52bdf072ada4 100644
--- a/src/meta-srv/src/service/store/kv.rs
+++ b/src/meta-srv/src/service/store/kv.rs
@@ -1,15 +1,9 @@
use std::sync::Arc;
-use api::v1::meta::BatchPutRequest;
-use api::v1::meta::BatchPutResponse;
-use api::v1::meta::CompareAndPutRequest;
-use api::v1::meta::CompareAndPutResponse;
-use api::v1::meta::DeleteRangeRequest;
-use api::v1::meta::DeleteRangeResponse;
-use api::v1::meta::PutRequest;
-use api::v1::meta::PutResponse;
-use api::v1::meta::RangeRequest;
-use api::v1::meta::RangeResponse;
+use api::v1::meta::{
+ BatchPutRequest, BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse,
+ DeleteRangeRequest, DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
+};
use crate::error::Result;
diff --git a/src/meta-srv/src/service/store/memory.rs b/src/meta-srv/src/service/store/memory.rs
index d68d90f4368d..7caf219d2578 100644
--- a/src/meta-srv/src/service/store/memory.rs
+++ b/src/meta-srv/src/service/store/memory.rs
@@ -3,18 +3,11 @@ use std::collections::BTreeMap;
use std::ops::Range;
use std::sync::Arc;
-use api::v1::meta::BatchPutRequest;
-use api::v1::meta::BatchPutResponse;
-use api::v1::meta::CompareAndPutRequest;
-use api::v1::meta::CompareAndPutResponse;
-use api::v1::meta::DeleteRangeRequest;
-use api::v1::meta::DeleteRangeResponse;
-use api::v1::meta::KeyValue;
-use api::v1::meta::PutRequest;
-use api::v1::meta::PutResponse;
-use api::v1::meta::RangeRequest;
-use api::v1::meta::RangeResponse;
-use api::v1::meta::ResponseHeader;
+use api::v1::meta::{
+ BatchPutRequest, BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse,
+ DeleteRangeRequest, DeleteRangeResponse, KeyValue, PutRequest, PutResponse, RangeRequest,
+ RangeResponse, ResponseHeader,
+};
use parking_lot::RwLock;
use crate::error::Result;
diff --git a/src/object-store/src/lib.rs b/src/object-store/src/lib.rs
index 03e47feb3752..83545a4e7d5e 100644
--- a/src/object-store/src/lib.rs
+++ b/src/object-store/src/lib.rs
@@ -1,6 +1,7 @@
+pub use opendal::io_util::SeekableReader;
pub use opendal::{
- io_util::SeekableReader, layers, services, Accessor, DirEntry, DirStreamer, Layer, Object,
- ObjectMetadata, ObjectMode, Operator as ObjectStore,
+ layers, services, Accessor, DirEntry, DirStreamer, Layer, Object, ObjectMetadata, ObjectMode,
+ Operator as ObjectStore,
};
pub mod backend;
pub mod util;
diff --git a/src/object-store/tests/object_store_test.rs b/src/object-store/tests/object_store_test.rs
index 920934d71f58..cb229b43a20d 100644
--- a/src/object-store/tests/object_store_test.rs
+++ b/src/object-store/tests/object_store_test.rs
@@ -2,10 +2,8 @@ use std::env;
use anyhow::Result;
use common_telemetry::logging;
-use object_store::{
- backend::{fs, s3},
- util, DirStreamer, Object, ObjectMode, ObjectStore,
-};
+use object_store::backend::{fs, s3};
+use object_store::{util, DirStreamer, Object, ObjectMode, ObjectStore};
use tempdir::TempDir;
async fn test_object_crud(store: &ObjectStore) -> Result<()> {
diff --git a/src/query/src/datafusion.rs b/src/query/src/datafusion.rs
index 246f6bc3644d..ded7e41b2ab7 100644
--- a/src/query/src/datafusion.rs
+++ b/src/query/src/datafusion.rs
@@ -10,32 +10,30 @@ use catalog::CatalogListRef;
use common_function::scalars::aggregate::AggregateFunctionMetaRef;
use common_function::scalars::udf::create_udf;
use common_function::scalars::FunctionRef;
-use common_query::physical_plan::PhysicalPlanAdapter;
-use common_query::physical_plan::{DfPhysicalPlanAdapter, PhysicalPlan};
-use common_query::{prelude::ScalarUdf, Output};
+use common_query::physical_plan::{DfPhysicalPlanAdapter, PhysicalPlan, PhysicalPlanAdapter};
+use common_query::prelude::ScalarUdf;
+use common_query::Output;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{EmptyRecordBatchStream, SendableRecordBatchStream};
use common_telemetry::timer;
use datafusion::physical_plan::coalesce_partitions::CoalescePartitionsExec;
use datafusion::physical_plan::ExecutionPlan;
use snafu::{OptionExt, ResultExt};
+use sql::dialect::GenericDialect;
+use sql::parser::ParserContext;
use sql::statements::statement::Statement;
-use sql::{dialect::GenericDialect, parser::ParserContext};
pub use crate::datafusion::catalog_adapter::DfCatalogListAdapter;
-use crate::metric;
+use crate::datafusion::planner::{DfContextProviderAdapter, DfPlanner};
+use crate::error::Result;
+use crate::executor::QueryExecutor;
+use crate::logical_optimizer::LogicalOptimizer;
+use crate::physical_optimizer::PhysicalOptimizer;
+use crate::physical_planner::PhysicalPlanner;
+use crate::plan::LogicalPlan;
+use crate::planner::Planner;
use crate::query_engine::{QueryContext, QueryEngineState};
-use crate::{
- datafusion::planner::{DfContextProviderAdapter, DfPlanner},
- error::Result,
- executor::QueryExecutor,
- logical_optimizer::LogicalOptimizer,
- physical_optimizer::PhysicalOptimizer,
- physical_planner::PhysicalPlanner,
- plan::LogicalPlan,
- planner::Planner,
- QueryEngine,
-};
+use crate::{metric, QueryEngine};
pub(crate) struct DatafusionQueryEngine {
state: QueryEngineState,
@@ -238,8 +236,7 @@ mod tests {
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_query::Output;
use common_recordbatch::util;
- use datafusion::field_util::FieldExt;
- use datafusion::field_util::SchemaExt;
+ use datafusion::field_util::{FieldExt, SchemaExt};
use table::table::numbers::NumbersTable;
use crate::query_engine::{QueryEngineFactory, QueryEngineRef};
diff --git a/src/query/src/datafusion/catalog_adapter.rs b/src/query/src/datafusion/catalog_adapter.rs
index ab4cfdceddef..220b5f0c7ab1 100644
--- a/src/query/src/datafusion/catalog_adapter.rs
+++ b/src/query/src/datafusion/catalog_adapter.rs
@@ -7,17 +7,15 @@ use catalog::error::Error;
use catalog::{
CatalogListRef, CatalogProvider, CatalogProviderRef, SchemaProvider, SchemaProviderRef,
};
-use datafusion::catalog::{
- catalog::{CatalogList as DfCatalogList, CatalogProvider as DfCatalogProvider},
- schema::SchemaProvider as DfSchemaProvider,
+use datafusion::catalog::catalog::{
+ CatalogList as DfCatalogList, CatalogProvider as DfCatalogProvider,
};
+use datafusion::catalog::schema::SchemaProvider as DfSchemaProvider;
use datafusion::datasource::TableProvider as DfTableProvider;
use datafusion::error::Result as DataFusionResult;
use snafu::ResultExt;
-use table::{
- table::adapter::{DfTableProviderAdapter, TableAdapter},
- TableRef,
-};
+use table::table::adapter::{DfTableProviderAdapter, TableAdapter};
+use table::TableRef;
use crate::datafusion::error;
diff --git a/src/query/src/datafusion/planner.rs b/src/query/src/datafusion/planner.rs
index 0c3a989cfbaa..5cbed99d3fbf 100644
--- a/src/query/src/datafusion/planner.rs
+++ b/src/query/src/datafusion/planner.rs
@@ -11,8 +11,11 @@ use snafu::ResultExt;
use sql::statements::query::Query;
use sql::statements::statement::Statement;
+use crate::datafusion::error;
+use crate::error::Result;
+use crate::plan::LogicalPlan;
+use crate::planner::Planner;
use crate::query_engine::QueryEngineState;
-use crate::{datafusion::error, error::Result, plan::LogicalPlan, planner::Planner};
pub struct DfPlanner<'a, S: ContextProvider> {
sql_to_rel: SqlToRel<'a, S>,
diff --git a/src/query/src/executor.rs b/src/query/src/executor.rs
index bc17ddd126ce..0d0e45c304ac 100644
--- a/src/query/src/executor.rs
+++ b/src/query/src/executor.rs
@@ -3,7 +3,8 @@ use std::sync::Arc;
use common_query::physical_plan::PhysicalPlan;
use common_recordbatch::SendableRecordBatchStream;
-use crate::{error::Result, query_engine::QueryContext};
+use crate::error::Result;
+use crate::query_engine::QueryContext;
/// Executor to run [ExecutionPlan].
#[async_trait::async_trait]
diff --git a/src/query/src/optimizer.rs b/src/query/src/optimizer.rs
index 8d878d48037c..9bd012af9669 100644
--- a/src/query/src/optimizer.rs
+++ b/src/query/src/optimizer.rs
@@ -13,8 +13,7 @@ use datafusion::logical_plan::{
};
use datafusion::optimizer::optimizer::OptimizerRule;
use datafusion::optimizer::utils;
-use datafusion_common::Result;
-use datafusion_common::{DFSchemaRef, DataFusionError, ScalarValue};
+use datafusion_common::{DFSchemaRef, DataFusionError, Result, ScalarValue};
/// TypeConversionRule converts some literal values in logical plan to other types according
/// to data type of corresponding columns.
diff --git a/src/query/src/physical_optimizer.rs b/src/query/src/physical_optimizer.rs
index ad449043fca6..65bff044eac5 100644
--- a/src/query/src/physical_optimizer.rs
+++ b/src/query/src/physical_optimizer.rs
@@ -2,7 +2,8 @@ use std::sync::Arc;
use common_query::physical_plan::PhysicalPlan;
-use crate::{error::Result, query_engine::QueryContext};
+use crate::error::Result;
+use crate::query_engine::QueryContext;
pub trait PhysicalOptimizer {
fn optimize_physical_plan(
diff --git a/src/query/src/planner.rs b/src/query/src/planner.rs
index 0814a037116c..413f13833534 100644
--- a/src/query/src/planner.rs
+++ b/src/query/src/planner.rs
@@ -1,6 +1,7 @@
use sql::statements::statement::Statement;
-use crate::{error::Result, plan::LogicalPlan};
+use crate::error::Result;
+use crate::plan::LogicalPlan;
/// SQL logical planner.
pub trait Planner: Send + Sync {
diff --git a/src/query/tests/argmax_test.rs b/src/query/tests/argmax_test.rs
index f8947f77a49f..fb9192ef1f60 100644
--- a/src/query/tests/argmax_test.rs
+++ b/src/query/tests/argmax_test.rs
@@ -3,8 +3,7 @@ mod function;
use common_query::Output;
use common_recordbatch::error::Result as RecordResult;
use common_recordbatch::{util, RecordBatch};
-use datafusion::field_util::FieldExt;
-use datafusion::field_util::SchemaExt;
+use datafusion::field_util::{FieldExt, SchemaExt};
use datatypes::for_all_primitive_types;
use datatypes::prelude::*;
use datatypes::types::PrimitiveElement;
diff --git a/src/query/tests/argmin_test.rs b/src/query/tests/argmin_test.rs
index 34334b1341eb..d4b025d0ffc0 100644
--- a/src/query/tests/argmin_test.rs
+++ b/src/query/tests/argmin_test.rs
@@ -4,8 +4,7 @@ mod function;
use common_query::Output;
use common_recordbatch::error::Result as RecordResult;
use common_recordbatch::{util, RecordBatch};
-use datafusion::field_util::FieldExt;
-use datafusion::field_util::SchemaExt;
+use datafusion::field_util::{FieldExt, SchemaExt};
use datatypes::for_all_primitive_types;
use datatypes::prelude::*;
use datatypes::types::PrimitiveElement;
diff --git a/src/query/tests/mean_test.rs b/src/query/tests/mean_test.rs
index cebd5b835303..f43f2cd8a9e4 100644
--- a/src/query/tests/mean_test.rs
+++ b/src/query/tests/mean_test.rs
@@ -4,8 +4,7 @@ mod function;
use common_query::Output;
use common_recordbatch::error::Result as RecordResult;
use common_recordbatch::{util, RecordBatch};
-use datafusion::field_util::FieldExt;
-use datafusion::field_util::SchemaExt;
+use datafusion::field_util::{FieldExt, SchemaExt};
use datatypes::for_all_primitive_types;
use datatypes::prelude::*;
use datatypes::types::PrimitiveElement;
diff --git a/src/query/tests/my_sum_udaf_example.rs b/src/query/tests/my_sum_udaf_example.rs
index b28d8e9ddf45..b3ccb2a489bb 100644
--- a/src/query/tests/my_sum_udaf_example.rs
+++ b/src/query/tests/my_sum_udaf_example.rs
@@ -7,10 +7,8 @@ use catalog::{CatalogList, CatalogProvider, SchemaProvider};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_function::scalars::aggregate::AggregateFunctionMeta;
use common_function_macro::{as_aggr_func_creator, AggrFuncTypeStore};
-use common_query::error::CreateAccumulatorSnafu;
-use common_query::error::Result as QueryResult;
-use common_query::logical_plan::Accumulator;
-use common_query::logical_plan::AggregateFunctionCreator;
+use common_query::error::{CreateAccumulatorSnafu, Result as QueryResult};
+use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
use common_query::prelude::*;
use common_query::Output;
use common_recordbatch::{util, RecordBatch};
@@ -18,8 +16,7 @@ use datafusion::arrow_print;
use datafusion_common::record_batch::RecordBatch as DfRecordBatch;
use datatypes::prelude::*;
use datatypes::schema::{ColumnSchema, Schema};
-use datatypes::types::PrimitiveElement;
-use datatypes::types::PrimitiveType;
+use datatypes::types::{PrimitiveElement, PrimitiveType};
use datatypes::vectors::PrimitiveVector;
use datatypes::with_match_primitive_type_id;
use num_traits::AsPrimitive;
diff --git a/src/query/tests/percentile_test.rs b/src/query/tests/percentile_test.rs
index 56e17d2575d1..1ef674705e74 100644
--- a/src/query/tests/percentile_test.rs
+++ b/src/query/tests/percentile_test.rs
@@ -6,8 +6,7 @@ use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_query::Output;
use common_recordbatch::error::Result as RecordResult;
use common_recordbatch::{util, RecordBatch};
-use datafusion::field_util::FieldExt;
-use datafusion::field_util::SchemaExt;
+use datafusion::field_util::{FieldExt, SchemaExt};
use datatypes::for_all_primitive_types;
use datatypes::prelude::*;
use datatypes::schema::{ColumnSchema, Schema};
diff --git a/src/query/tests/polyval_test.rs b/src/query/tests/polyval_test.rs
index 602e51889ed6..5b37849dc339 100644
--- a/src/query/tests/polyval_test.rs
+++ b/src/query/tests/polyval_test.rs
@@ -4,8 +4,7 @@ mod function;
use common_query::Output;
use common_recordbatch::error::Result as RecordResult;
use common_recordbatch::{util, RecordBatch};
-use datafusion::field_util::FieldExt;
-use datafusion::field_util::SchemaExt;
+use datafusion::field_util::{FieldExt, SchemaExt};
use datatypes::for_all_primitive_types;
use datatypes::prelude::*;
use datatypes::types::PrimitiveElement;
diff --git a/src/query/tests/pow.rs b/src/query/tests/pow.rs
index a93d1f5b816d..09af588d49fe 100644
--- a/src/query/tests/pow.rs
+++ b/src/query/tests/pow.rs
@@ -1,8 +1,7 @@
use std::sync::Arc;
use common_query::error::Result;
-use datatypes::prelude::ScalarVector;
-use datatypes::prelude::Vector;
+use datatypes::prelude::{ScalarVector, Vector};
use datatypes::vectors::{UInt32Vector, VectorRef};
pub fn pow(args: &[VectorRef]) -> Result<VectorRef> {
diff --git a/src/query/tests/query_engine_test.rs b/src/query/tests/query_engine_test.rs
index d0af4ab9022c..1a515158e883 100644
--- a/src/query/tests/query_engine_test.rs
+++ b/src/query/tests/query_engine_test.rs
@@ -10,8 +10,7 @@ use common_query::prelude::{create_udf, make_scalar_function, Volatility};
use common_query::Output;
use common_recordbatch::error::Result as RecordResult;
use common_recordbatch::{util, RecordBatch};
-use datafusion::field_util::FieldExt;
-use datafusion::field_util::SchemaExt;
+use datafusion::field_util::{FieldExt, SchemaExt};
use datafusion::logical_plan::LogicalPlanBuilder;
use datatypes::for_all_primitive_types;
use datatypes::prelude::*;
diff --git a/src/query/tests/scipy_stats_norm_cdf_test.rs b/src/query/tests/scipy_stats_norm_cdf_test.rs
index a53502b1676b..ae531527e59d 100644
--- a/src/query/tests/scipy_stats_norm_cdf_test.rs
+++ b/src/query/tests/scipy_stats_norm_cdf_test.rs
@@ -4,8 +4,7 @@ mod function;
use common_query::Output;
use common_recordbatch::error::Result as RecordResult;
use common_recordbatch::{util, RecordBatch};
-use datafusion::field_util::FieldExt;
-use datafusion::field_util::SchemaExt;
+use datafusion::field_util::{FieldExt, SchemaExt};
use datatypes::for_all_primitive_types;
use datatypes::prelude::*;
use datatypes::types::PrimitiveElement;
diff --git a/src/query/tests/scipy_stats_norm_pdf.rs b/src/query/tests/scipy_stats_norm_pdf.rs
index d99dca4cb2ef..b744f38afa30 100644
--- a/src/query/tests/scipy_stats_norm_pdf.rs
+++ b/src/query/tests/scipy_stats_norm_pdf.rs
@@ -4,8 +4,7 @@ mod function;
use common_query::Output;
use common_recordbatch::error::Result as RecordResult;
use common_recordbatch::{util, RecordBatch};
-use datafusion::field_util::FieldExt;
-use datafusion::field_util::SchemaExt;
+use datafusion::field_util::{FieldExt, SchemaExt};
use datatypes::for_all_primitive_types;
use datatypes::prelude::*;
use datatypes::types::PrimitiveElement;
diff --git a/src/script/src/manager.rs b/src/script/src/manager.rs
index 1b476f5fc53c..d1ed208c6d16 100644
--- a/src/script/src/manager.rs
+++ b/src/script/src/manager.rs
@@ -88,8 +88,10 @@ mod tests {
use super::*;
type DefaultEngine = MitoEngine<EngineImpl<LocalFileLogStore>>;
- use log_store::fs::{config::LogConfig, log::LocalFileLogStore};
- use storage::{config::EngineConfig as StorageEngineConfig, EngineImpl};
+ use log_store::fs::config::LogConfig;
+ use log_store::fs::log::LocalFileLogStore;
+ use storage::config::EngineConfig as StorageEngineConfig;
+ use storage::EngineImpl;
use table_engine::engine::MitoEngine;
use tempdir::TempDir;
diff --git a/src/script/src/python/builtins/mod.rs b/src/script/src/python/builtins/mod.rs
index fcce9c71e390..a7b03485dd3d 100644
--- a/src/script/src/python/builtins/mod.rs
+++ b/src/script/src/python/builtins/mod.rs
@@ -12,12 +12,8 @@ use datatypes::arrow::array::ArrayRef;
use datatypes::arrow::compute::cast::CastOptions;
use datatypes::arrow::datatypes::DataType;
use datatypes::vectors::Helper as HelperVec;
-use rustpython_vm::builtins::PyList;
-use rustpython_vm::pymodule;
-use rustpython_vm::{
- builtins::{PyBaseExceptionRef, PyBool, PyFloat, PyInt, PyStr},
- AsObject, PyObjectRef, PyPayload, PyResult, VirtualMachine,
-};
+use rustpython_vm::builtins::{PyBaseExceptionRef, PyBool, PyFloat, PyInt, PyList, PyStr};
+use rustpython_vm::{pymodule, AsObject, PyObjectRef, PyPayload, PyResult, VirtualMachine};
use crate::python::utils::is_instance;
use crate::python::PyVector;
@@ -274,45 +270,31 @@ pub(crate) mod greptime_builtin {
// P.S.: not extract to file because not-inlined proc macro attribute is *unstable*
use std::sync::Arc;
- use common_function::scalars::{
- function::FunctionContext, math::PowFunction, Function, FunctionRef, FUNCTION_REGISTRY,
- };
- use datafusion::{
- arrow::{
- compute::comparison::{gt_eq_scalar, lt_eq_scalar},
- datatypes::DataType,
- error::ArrowError,
- scalar::{PrimitiveScalar, Scalar},
- },
- physical_plan::expressions,
- };
+ use common_function::scalars::function::FunctionContext;
+ use common_function::scalars::math::PowFunction;
+ use common_function::scalars::{Function, FunctionRef, FUNCTION_REGISTRY};
+ use datafusion::arrow::compute::comparison::{gt_eq_scalar, lt_eq_scalar};
+ use datafusion::arrow::datatypes::DataType;
+ use datafusion::arrow::error::ArrowError;
+ use datafusion::arrow::scalar::{PrimitiveScalar, Scalar};
+ use datafusion::physical_plan::expressions;
use datafusion_expr::ColumnarValue as DFColValue;
use datafusion_physical_expr::math_expressions;
- use datatypes::vectors::{ConstantVector, Float64Vector, Helper, Int64Vector};
- use datatypes::{
- arrow::{
- self,
- array::{ArrayRef, NullArray},
- compute,
- },
- vectors::VectorRef,
- };
+ use datatypes::arrow::array::{ArrayRef, NullArray};
+ use datatypes::arrow::{self, compute};
+ use datatypes::vectors::{ConstantVector, Float64Vector, Helper, Int64Vector, VectorRef};
use paste::paste;
- use rustpython_vm::{
- builtins::{PyFloat, PyFunction, PyInt, PyStr},
- function::{FuncArgs, KwArgs, OptionalArg},
- AsObject, PyObjectRef, PyPayload, PyRef, PyResult, VirtualMachine,
- };
+ use rustpython_vm::builtins::{PyFloat, PyFunction, PyInt, PyStr};
+ use rustpython_vm::function::{FuncArgs, KwArgs, OptionalArg};
+ use rustpython_vm::{AsObject, PyObjectRef, PyPayload, PyRef, PyResult, VirtualMachine};
use crate::python::builtins::{
all_to_f64, eval_aggr_fn, from_df_err, try_into_columnar_value, try_into_py_obj,
type_cast_error,
};
- use crate::python::{
- utils::{is_instance, py_vec_obj_to_array, PyVectorRef},
- vector::val_to_pyobj,
- PyVector,
- };
+ use crate::python::utils::{is_instance, py_vec_obj_to_array, PyVectorRef};
+ use crate::python::vector::val_to_pyobj;
+ use crate::python::PyVector;
#[pyfunction]
fn vector(args: OptionalArg<PyObjectRef>, vm: &VirtualMachine) -> PyResult<PyVector> {
diff --git a/src/script/src/python/builtins/test.rs b/src/script/src/python/builtins/test.rs
index 1d310abdcc5c..50618cd0353d 100644
--- a/src/script/src/python/builtins/test.rs
+++ b/src/script/src/python/builtins/test.rs
@@ -1,25 +1,24 @@
-use std::{collections::HashMap, fs::File, io::Read, path::Path, sync::Arc};
+use std::collections::HashMap;
+use std::fs::File;
+use std::io::Read;
+use std::path::Path;
+use std::sync::Arc;
-use arrow::{
- array::{Float64Array, Int64Array, PrimitiveArray},
- compute::cast::CastOptions,
- datatypes::DataType,
-};
+use arrow::array::{Float64Array, Int64Array, PrimitiveArray};
+use arrow::compute::cast::CastOptions;
+use arrow::datatypes::DataType;
use datatypes::vectors::VectorRef;
use ron::from_str as from_ron_string;
+use rustpython_vm::builtins::{PyFloat, PyInt, PyList};
use rustpython_vm::class::PyClassImpl;
-use rustpython_vm::{
- builtins::{PyFloat, PyInt, PyList},
- convert::ToPyObject,
- scope::Scope,
- AsObject, PyObjectRef, VirtualMachine,
-};
+use rustpython_vm::convert::ToPyObject;
+use rustpython_vm::scope::Scope;
+use rustpython_vm::{AsObject, PyObjectRef, VirtualMachine};
use serde::{Deserialize, Serialize};
-use super::greptime_builtin;
-use super::*;
-use crate::python::utils::format_py_error;
-use crate::python::{utils::is_instance, PyVector};
+use super::{greptime_builtin, *};
+use crate::python::utils::{format_py_error, is_instance};
+use crate::python::PyVector;
#[test]
fn convert_scalar_to_py_obj_and_back() {
rustpython_vm::Interpreter::with_init(Default::default(), |vm| {
diff --git a/src/script/src/python/coprocessor.rs b/src/script/src/python/coprocessor.rs
index c47fd7578fd9..39e527d3c4e6 100644
--- a/src/script/src/python/coprocessor.rs
+++ b/src/script/src/python/coprocessor.rs
@@ -12,11 +12,11 @@ use datatypes::arrow::array::{Array, ArrayRef};
use datatypes::arrow::compute::cast::CastOptions;
use datatypes::arrow::datatypes::{DataType, Field, Schema as ArrowSchema};
use datatypes::schema::Schema;
-use datatypes::vectors::Helper;
-use datatypes::vectors::{BooleanVector, StringVector, Vector, VectorRef};
+use datatypes::vectors::{BooleanVector, Helper, StringVector, Vector, VectorRef};
use rustpython_bytecode::CodeObject;
use rustpython_vm as vm;
-use rustpython_vm::{class::PyClassImpl, AsObject};
+use rustpython_vm::class::PyClassImpl;
+use rustpython_vm::AsObject;
#[cfg(test)]
use serde::Deserialize;
use snafu::{OptionExt, ResultExt};
@@ -29,8 +29,8 @@ use crate::python::coprocessor::parse::DecoratorArgs;
use crate::python::error::{
ensure, ret_other_error_with, ArrowSnafu, OtherSnafu, Result, TypeCastSnafu,
};
-use crate::python::utils::{format_py_error, py_vec_obj_to_array};
-use crate::python::{utils::is_instance, PyVector};
+use crate::python::utils::{format_py_error, is_instance, py_vec_obj_to_array};
+use crate::python::PyVector;
#[cfg_attr(test, derive(Deserialize))]
#[derive(Debug, Clone, PartialEq, Eq)]
diff --git a/src/script/src/python/coprocessor/compile.rs b/src/script/src/python/coprocessor/compile.rs
index 196684d711fd..151a0dbd882a 100644
--- a/src/script/src/python/coprocessor/compile.rs
+++ b/src/script/src/python/coprocessor/compile.rs
@@ -2,11 +2,8 @@
use rustpython_bytecode::CodeObject;
use rustpython_compiler_core::compile as python_compile;
-use rustpython_parser::{
- ast,
- ast::{Located, Location},
- parser,
-};
+use rustpython_parser::ast::{Located, Location};
+use rustpython_parser::{ast, parser};
use snafu::ResultExt;
use crate::fail_parse_error;
diff --git a/src/script/src/python/coprocessor/parse.rs b/src/script/src/python/coprocessor/parse.rs
index c3f479721899..f329a002e246 100644
--- a/src/script/src/python/coprocessor/parse.rs
+++ b/src/script/src/python/coprocessor/parse.rs
@@ -1,18 +1,13 @@
use std::collections::HashSet;
use datatypes::arrow::datatypes::DataType;
-use rustpython_parser::{
- ast,
- ast::{Arguments, Location},
- parser,
-};
+use rustpython_parser::ast::{Arguments, Location};
+use rustpython_parser::{ast, parser};
#[cfg(test)]
use serde::Deserialize;
use snafu::{OptionExt, ResultExt};
-use crate::python::coprocessor::compile;
-use crate::python::coprocessor::AnnotationInfo;
-use crate::python::coprocessor::Coprocessor;
+use crate::python::coprocessor::{compile, AnnotationInfo, Coprocessor};
use crate::python::error::{ensure, CoprParseSnafu, PyParseSnafu, Result};
#[cfg_attr(test, derive(Deserialize))]
diff --git a/src/script/src/python/engine.rs b/src/script/src/python/engine.rs
index acdee232ad37..1f52a541c932 100644
--- a/src/script/src/python/engine.rs
+++ b/src/script/src/python/engine.rs
@@ -7,10 +7,8 @@ use std::task::{Context, Poll};
use async_trait::async_trait;
use common_error::prelude::BoxedError;
use common_query::Output;
-use common_recordbatch::{
- error::ExternalSnafu, error::Result as RecordBatchResult, RecordBatch, RecordBatchStream,
- SendableRecordBatchStream,
-};
+use common_recordbatch::error::{ExternalSnafu, Result as RecordBatchResult};
+use common_recordbatch::{RecordBatch, RecordBatchStream, SendableRecordBatchStream};
use datatypes::schema::SchemaRef;
use futures::Stream;
use query::QueryEngineRef;
@@ -18,11 +16,8 @@ use snafu::{ensure, ResultExt};
use sql::statements::statement::Statement;
use crate::engine::{CompileContext, EvalContext, Script, ScriptEngine};
-use crate::python::coprocessor::{exec_parsed, parse};
-use crate::python::{
- coprocessor::CoprocessorRef,
- error::{self, Result},
-};
+use crate::python::coprocessor::{exec_parsed, parse, CoprocessorRef};
+use crate::python::error::{self, Result};
const PY_ENGINE: &str = "python";
@@ -137,10 +132,8 @@ mod tests {
use catalog::{CatalogList, CatalogProvider, SchemaProvider};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_recordbatch::util;
- use datafusion_common::field_util::FieldExt;
- use datafusion_common::field_util::SchemaExt;
- use datatypes::arrow::array::Float64Array;
- use datatypes::arrow::array::Int64Array;
+ use datafusion_common::field_util::{FieldExt, SchemaExt};
+ use datatypes::arrow::array::{Float64Array, Int64Array};
use query::QueryEngineFactory;
use table::table::numbers::NumbersTable;
diff --git a/src/script/src/python/error.rs b/src/script/src/python/error.rs
index 43c7c46c135e..8c418af44d8d 100644
--- a/src/script/src/python/error.rs
+++ b/src/script/src/python/error.rs
@@ -5,9 +5,11 @@ use datatypes::arrow::error::ArrowError;
use datatypes::error::Error as DataTypeError;
use query::error::Error as QueryError;
use rustpython_compiler_core::error::CompileError as CoreCompileError;
-use rustpython_parser::{ast::Location, error::ParseError};
+use rustpython_parser::ast::Location;
+use rustpython_parser::error::ParseError;
pub use snafu::ensure;
-use snafu::{prelude::Snafu, Backtrace};
+use snafu::prelude::Snafu;
+use snafu::Backtrace;
pub type Result<T> = std::result::Result<T, Error>;
pub(crate) fn ret_other_error_with(reason: String) -> OtherSnafu<String> {
diff --git a/src/script/src/python/test.rs b/src/script/src/python/test.rs
index b307d8568379..6cca4eb7b685 100644
--- a/src/script/src/python/test.rs
+++ b/src/script/src/python/test.rs
@@ -15,11 +15,10 @@ use rustpython_parser::parser;
use serde::{Deserialize, Serialize};
use super::error::{get_error_reason_loc, visualize_loc};
-use crate::python::coprocessor::AnnotationInfo;
-use crate::python::error::pretty_print_error_in_src;
-use crate::python::{
- coprocessor, coprocessor::parse::parse_and_compile_copr, coprocessor::Coprocessor, error::Error,
-};
+use crate::python::coprocessor;
+use crate::python::coprocessor::parse::parse_and_compile_copr;
+use crate::python::coprocessor::{AnnotationInfo, Coprocessor};
+use crate::python::error::{pretty_print_error_in_src, Error};
#[derive(Deserialize, Debug)]
struct TestCase {
diff --git a/src/script/src/python/utils.rs b/src/script/src/python/utils.rs
index 4b06e552f6c2..5e4415ea3eb3 100644
--- a/src/script/src/python/utils.rs
+++ b/src/script/src/python/utils.rs
@@ -4,16 +4,13 @@ use datafusion::arrow::array::{ArrayRef, BooleanArray, NullArray, PrimitiveArray
use datafusion_common::ScalarValue;
use datafusion_expr::ColumnarValue as DFColValue;
use datatypes::arrow::datatypes::DataType;
-use rustpython_vm::builtins::{PyBool, PyFloat, PyInt, PyList, PyStr};
-use rustpython_vm::{builtins::PyBaseExceptionRef, PyObjectRef, PyPayload, PyRef, VirtualMachine};
-use snafu::OptionExt;
-use snafu::ResultExt;
-use snafu::{Backtrace, GenerateImplicitData};
+use rustpython_vm::builtins::{PyBaseExceptionRef, PyBool, PyFloat, PyInt, PyList, PyStr};
+use rustpython_vm::{PyObjectRef, PyPayload, PyRef, VirtualMachine};
+use snafu::{Backtrace, GenerateImplicitData, OptionExt, ResultExt};
use crate::python::builtins::try_into_columnar_value;
-use crate::python::error;
use crate::python::error::ret_other_error_with;
-use crate::python::PyVector;
+use crate::python::{error, PyVector};
pub(crate) type PyVectorRef = PyRef<PyVector>;
diff --git a/src/script/src/python/vector.rs b/src/script/src/python/vector.rs
index c3b583002b02..021b51ddbc58 100644
--- a/src/script/src/python/vector.rs
+++ b/src/script/src/python/vector.rs
@@ -4,40 +4,27 @@ use std::sync::Arc;
use common_time::date::Date;
use common_time::datetime::DateTime;
use common_time::timestamp::Timestamp;
-use datatypes::arrow;
-use datatypes::arrow::array::BooleanArray;
+use datatypes::arrow::array::{Array, ArrayRef, BooleanArray, PrimitiveArray};
use datatypes::arrow::compute;
+use datatypes::arrow::compute::cast::{self, CastOptions};
+use datatypes::arrow::compute::{arithmetics, comparison};
use datatypes::arrow::datatypes::DataType;
use datatypes::arrow::scalar::{PrimitiveScalar, Scalar};
-use datatypes::arrow::{
- array::{Array, ArrayRef, PrimitiveArray},
- compute::{
- arithmetics,
- cast::{self, CastOptions},
- comparison,
- },
-};
use datatypes::data_type::ConcreteDataType;
use datatypes::prelude::Value;
use datatypes::value::OrderedFloat;
-use datatypes::{
- value,
- vectors::{Helper, NullVector, VectorBuilder, VectorRef},
-};
-use rustpython_vm::function::{Either, PyComparisonValue};
-use rustpython_vm::types::{Comparable, PyComparisonOp};
+use datatypes::vectors::{Helper, NullVector, VectorBuilder, VectorRef};
+use datatypes::{arrow, value};
+use rustpython_vm::builtins::{PyBaseExceptionRef, PyBool, PyBytes, PyFloat, PyInt, PyNone, PyStr};
+use rustpython_vm::function::{Either, OptionalArg, PyComparisonValue};
+use rustpython_vm::protocol::{PyMappingMethods, PySequenceMethods};
+use rustpython_vm::sliceable::{SaturatedSlice, SequenceIndex, SequenceIndexOp};
+use rustpython_vm::types::{AsMapping, AsSequence, Comparable, PyComparisonOp};
use rustpython_vm::{
- builtins::{PyBaseExceptionRef, PyBool, PyBytes, PyFloat, PyInt, PyNone, PyStr},
- function::OptionalArg,
- protocol::{PyMappingMethods, PySequenceMethods},
- pyclass, pyimpl,
- sliceable::{SaturatedSlice, SequenceIndex, SequenceIndexOp},
- types::{AsMapping, AsSequence},
- AsObject, PyObject, PyObjectRef, PyPayload, PyRef, PyResult, VirtualMachine,
+ pyclass, pyimpl, AsObject, PyObject, PyObjectRef, PyPayload, PyRef, PyResult, VirtualMachine,
};
-use crate::python::utils::is_instance;
-use crate::python::utils::PyVectorRef;
+use crate::python::utils::{is_instance, PyVectorRef};
#[pyclass(module = false, name = "vector")]
#[derive(PyPayload, Debug)]
@@ -1033,7 +1020,9 @@ pub mod tests {
use std::sync::Arc;
use datatypes::vectors::{Float32Vector, Int32Vector, NullVector};
- use rustpython_vm::{builtins::PyList, class::PyClassImpl, protocol::PySequence};
+ use rustpython_vm::builtins::PyList;
+ use rustpython_vm::class::PyClassImpl;
+ use rustpython_vm::protocol::PySequence;
use value::Value;
use super::*;
diff --git a/src/script/src/table.rs b/src/script/src/table.rs
index bd45875bb15a..fa5b4b462402 100644
--- a/src/script/src/table.rs
+++ b/src/script/src/table.rs
@@ -10,8 +10,7 @@ use common_telemetry::logging;
use common_time::timestamp::Timestamp;
use common_time::util;
use datatypes::arrow::array::Utf8Array;
-use datatypes::prelude::ConcreteDataType;
-use datatypes::prelude::ScalarVector;
+use datatypes::prelude::{ConcreteDataType, ScalarVector};
use datatypes::schema::{ColumnSchema, Schema, SchemaBuilder};
use datatypes::vectors::{StringVector, TimestampVector, VectorRef};
use query::QueryEngineRef;
diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs
index 9c144f0d0996..51fc7e73f785 100644
--- a/src/servers/src/error.rs
+++ b/src/servers/src/error.rs
@@ -2,10 +2,8 @@ use std::any::Any;
use std::net::SocketAddr;
use axum::http::StatusCode as HttpStatusCode;
-use axum::{
- response::{IntoResponse, Response},
- Json,
-};
+use axum::response::{IntoResponse, Response};
+use axum::Json;
use common_error::prelude::*;
use serde_json::json;
diff --git a/src/servers/src/grpc.rs b/src/servers/src/grpc.rs
index b7a4599610d7..97588f529248 100644
--- a/src/servers/src/grpc.rs
+++ b/src/servers/src/grpc.rs
@@ -8,8 +8,7 @@ use async_trait::async_trait;
use common_runtime::Runtime;
use common_telemetry::logging::info;
use futures::FutureExt;
-use snafu::ensure;
-use snafu::ResultExt;
+use snafu::{ensure, ResultExt};
use tokio::net::TcpListener;
use tokio::sync::oneshot::{self, Sender};
use tokio::sync::Mutex;
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index ea5f36b7cad8..fc913ce28656 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -9,14 +9,13 @@ use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
-use aide::axum::routing as apirouting;
-use aide::axum::{ApiRouter, IntoApiResponse};
+use aide::axum::{routing as apirouting, ApiRouter, IntoApiResponse};
use aide::openapi::{Info, OpenApi, Server as OpenAPIServer};
use async_trait::async_trait;
+use axum::error_handling::HandleErrorLayer;
use axum::middleware::{self};
-use axum::response::Html;
-use axum::Extension;
-use axum::{error_handling::HandleErrorLayer, response::Json, routing, BoxError, Router};
+use axum::response::{Html, Json};
+use axum::{routing, BoxError, Extension, Router};
use common_error::prelude::ErrorExt;
use common_error::status_code::StatusCode;
use common_query::Output;
@@ -27,19 +26,18 @@ use futures::FutureExt;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use serde_json::Value;
-use snafu::ensure;
-use snafu::ResultExt;
+use snafu::{ensure, ResultExt};
use tokio::sync::oneshot::{self, Sender};
use tokio::sync::Mutex;
-use tower::{timeout::TimeoutLayer, ServiceBuilder};
+use tower::timeout::TimeoutLayer;
+use tower::ServiceBuilder;
use tower_http::trace::TraceLayer;
use self::influxdb::influxdb_write;
use crate::error::{AlreadyStartedSnafu, Result, StartHttpSnafu};
-use crate::query_handler::SqlQueryHandlerRef;
use crate::query_handler::{
InfluxdbLineProtocolHandlerRef, OpentsdbProtocolHandlerRef, PrometheusProtocolHandlerRef,
- ScriptHandlerRef,
+ ScriptHandlerRef, SqlQueryHandlerRef,
};
use crate::server::Server;
diff --git a/src/servers/src/http/context.rs b/src/servers/src/http/context.rs
index dd1a84315dc0..8a2050a1a120 100644
--- a/src/servers/src/http/context.rs
+++ b/src/servers/src/http/context.rs
@@ -1,9 +1,7 @@
-use axum::{
- http,
- http::{Request, StatusCode},
- middleware::Next,
- response::Response,
-};
+use axum::http;
+use axum::http::{Request, StatusCode};
+use axum::middleware::Next;
+use axum::response::Response;
use common_telemetry::error;
use crate::context::{AuthMethod, Channel, CtxBuilder};
diff --git a/src/servers/src/http/influxdb.rs b/src/servers/src/http/influxdb.rs
index 9a561fac92d1..5b8695e22630 100644
--- a/src/servers/src/http/influxdb.rs
+++ b/src/servers/src/http/influxdb.rs
@@ -5,8 +5,7 @@ use axum::http::StatusCode;
use common_catalog::consts::DEFAULT_SCHEMA_NAME;
use common_grpc::writer::Precision;
-use crate::error::Result;
-use crate::error::TimePrecisionSnafu;
+use crate::error::{Result, TimePrecisionSnafu};
use crate::influxdb::InfluxdbRequest;
use crate::query_handler::InfluxdbLineProtocolHandlerRef;
diff --git a/src/servers/src/http/prometheus.rs b/src/servers/src/http/prometheus.rs
index e286bcf75c35..02a62973e741 100644
--- a/src/servers/src/http/prometheus.rs
+++ b/src/servers/src/http/prometheus.rs
@@ -1,7 +1,6 @@
use api::prometheus::remote::{ReadRequest, WriteRequest};
use axum::extract::{Query, RawBody, State};
-use axum::http::header;
-use axum::http::StatusCode;
+use axum::http::{header, StatusCode};
use axum::response::IntoResponse;
use common_catalog::consts::DEFAULT_SCHEMA_NAME;
use hyper::Body;
diff --git a/src/servers/src/influxdb.rs b/src/servers/src/influxdb.rs
index d09029bbc997..c8e755600c4d 100644
--- a/src/servers/src/influxdb.rs
+++ b/src/servers/src/influxdb.rs
@@ -1,9 +1,7 @@
use std::collections::HashMap;
-use api::v1::{
- insert_expr::{self, Expr},
- InsertExpr,
-};
+use api::v1::insert_expr::{self, Expr};
+use api::v1::InsertExpr;
use common_grpc::writer::{LinesWriter, Precision};
use influxdb_line_protocol::{parse_lines, FieldValue};
use snafu::ResultExt;
@@ -168,17 +166,18 @@ impl TryFrom<&InfluxdbRequest> for Vec<InsertExpr> {
#[cfg(test)]
mod tests {
- use std::{ops::Deref, sync::Arc};
-
- use api::v1::{
- codec::InsertBatch,
- column::{SemanticType, Values},
- insert_expr::Expr,
- Column, ColumnDataType, InsertExpr,
- };
+ use std::ops::Deref;
+ use std::sync::Arc;
+
+ use api::v1::codec::InsertBatch;
+ use api::v1::column::{SemanticType, Values};
+ use api::v1::insert_expr::Expr;
+ use api::v1::{Column, ColumnDataType, InsertExpr};
use common_base::BitVec;
- use common_time::{timestamp::TimeUnit, Timestamp};
- use datatypes::{value::Value, vectors::Vector};
+ use common_time::timestamp::TimeUnit;
+ use common_time::Timestamp;
+ use datatypes::value::Value;
+ use datatypes::vectors::Vector;
use table::requests::InsertRequest;
use crate::influxdb::InfluxdbRequest;
diff --git a/src/servers/src/line_writer.rs b/src/servers/src/line_writer.rs
index a2196b45b6da..e11ede723c05 100644
--- a/src/servers/src/line_writer.rs
+++ b/src/servers/src/line_writer.rs
@@ -2,13 +2,12 @@ use std::collections::HashMap;
use common_catalog::consts::DEFAULT_CATALOG_NAME;
use common_grpc::writer::{to_ms_ts, Precision};
-use common_time::{timestamp::TimeUnit::Millisecond, Timestamp};
-use datatypes::{
- prelude::ConcreteDataType,
- types::TimestampType,
- value::Value,
- vectors::{VectorBuilder, VectorRef},
-};
+use common_time::timestamp::TimeUnit::Millisecond;
+use common_time::Timestamp;
+use datatypes::prelude::ConcreteDataType;
+use datatypes::types::TimestampType;
+use datatypes::value::Value;
+use datatypes::vectors::{VectorBuilder, VectorRef};
use table::requests::InsertRequest;
type ColumnLen = usize;
@@ -137,7 +136,8 @@ mod tests {
use common_catalog::consts::DEFAULT_SCHEMA_NAME;
use common_time::Timestamp;
- use datatypes::{value::Value, vectors::Vector};
+ use datatypes::value::Value;
+ use datatypes::vectors::Vector;
use crate::line_writer::{LineWriter, Precision};
diff --git a/src/servers/src/mysql/handler.rs b/src/servers/src/mysql/handler.rs
index 620fd799ba6e..d200e306d8b4 100644
--- a/src/servers/src/mysql/handler.rs
+++ b/src/servers/src/mysql/handler.rs
@@ -3,11 +3,9 @@ use std::sync::Arc;
use async_trait::async_trait;
use common_telemetry::error;
-use opensrv_mysql::AsyncMysqlShim;
-use opensrv_mysql::ErrorKind;
-use opensrv_mysql::ParamParser;
-use opensrv_mysql::QueryResultWriter;
-use opensrv_mysql::StatementMetaWriter;
+use opensrv_mysql::{
+ AsyncMysqlShim, ErrorKind, ParamParser, QueryResultWriter, StatementMetaWriter,
+};
use rand::RngCore;
use tokio::sync::RwLock;
diff --git a/src/servers/src/opentsdb/codec.rs b/src/servers/src/opentsdb/codec.rs
index cd7f2ea2aa9a..7871f7e05798 100644
--- a/src/servers/src/opentsdb/codec.rs
+++ b/src/servers/src/opentsdb/codec.rs
@@ -1,7 +1,8 @@
use std::collections::HashMap;
use api::v1::codec::InsertBatch;
-use api::v1::{column, column::SemanticType, insert_expr, Column, ColumnDataType, InsertExpr};
+use api::v1::column::SemanticType;
+use api::v1::{column, insert_expr, Column, ColumnDataType, InsertExpr};
use common_catalog::consts::DEFAULT_SCHEMA_NAME;
use common_grpc::writer::Precision;
use table::requests::InsertRequest;
@@ -206,7 +207,8 @@ impl DataPoint {
mod test {
use std::sync::Arc;
- use common_time::{timestamp::TimeUnit, Timestamp};
+ use common_time::timestamp::TimeUnit;
+ use common_time::Timestamp;
use datatypes::value::Value;
use datatypes::vectors::Vector;
diff --git a/src/servers/src/postgres/auth_handler.rs b/src/servers/src/postgres/auth_handler.rs
index ab94e4eb4fb1..6e279d5911db 100644
--- a/src/servers/src/postgres/auth_handler.rs
+++ b/src/servers/src/postgres/auth_handler.rs
@@ -5,8 +5,7 @@ use async_trait::async_trait;
use futures::{Sink, SinkExt};
use pgwire::api::auth::{ServerParameterProvider, StartupHandler};
use pgwire::api::{auth, ClientInfo, PgWireConnectionState};
-use pgwire::error::ErrorInfo;
-use pgwire::error::{PgWireError, PgWireResult};
+use pgwire::error::{ErrorInfo, PgWireError, PgWireResult};
use pgwire::messages::response::ErrorResponse;
use pgwire::messages::startup::Authentication;
use pgwire::messages::{PgWireBackendMessage, PgWireFrontendMessage};
diff --git a/src/servers/src/prometheus.rs b/src/servers/src/prometheus.rs
index 4dd642dfb507..485201869249 100644
--- a/src/servers/src/prometheus.rs
+++ b/src/servers/src/prometheus.rs
@@ -3,14 +3,11 @@ use std::cmp::Ordering;
use std::collections::{BTreeMap, HashMap};
use std::hash::{Hash, Hasher};
-use api::prometheus::remote::{
- label_matcher::Type as MatcherType, Label, Query, Sample, TimeSeries, WriteRequest,
-};
-use api::v1::codec::InsertBatch;
-use api::v1::{
- codec::SelectResult, column, column::SemanticType, insert_expr, Column, ColumnDataType,
- InsertExpr,
-};
+use api::prometheus::remote::label_matcher::Type as MatcherType;
+use api::prometheus::remote::{Label, Query, Sample, TimeSeries, WriteRequest};
+use api::v1::codec::{InsertBatch, SelectResult};
+use api::v1::column::SemanticType;
+use api::v1::{column, insert_expr, Column, ColumnDataType, InsertExpr};
use common_grpc::writer::Precision::MILLISECOND;
use openmetrics_parser::{MetricsExposition, PrometheusType, PrometheusValue};
use snafu::{OptionExt, ResultExt};
@@ -507,7 +504,8 @@ mod tests {
use api::prometheus::remote::LabelMatcher;
use common_time::timestamp::TimeUnit;
use common_time::Timestamp;
- use datatypes::{value::Value, vectors::Vector};
+ use datatypes::value::Value;
+ use datatypes::vectors::Vector;
use super::*;
diff --git a/src/servers/src/server.rs b/src/servers/src/server.rs
index a8f7bfefcbf4..d4927a1e7303 100644
--- a/src/servers/src/server.rs
+++ b/src/servers/src/server.rs
@@ -4,8 +4,7 @@ use std::sync::Arc;
use async_trait::async_trait;
use common_runtime::Runtime;
use common_telemetry::logging::{error, info};
-use futures::future::AbortRegistration;
-use futures::future::{AbortHandle, Abortable};
+use futures::future::{AbortHandle, AbortRegistration, Abortable};
use snafu::ResultExt;
use tokio::sync::Mutex;
use tokio::task::JoinHandle;
diff --git a/src/servers/tests/http/http_handler_test.rs b/src/servers/tests/http/http_handler_test.rs
index 6b68f8318c3b..1420967184cb 100644
--- a/src/servers/tests/http/http_handler_test.rs
+++ b/src/servers/tests/http/http_handler_test.rs
@@ -4,9 +4,7 @@ use axum::body::Body;
use axum::extract::{Json, Query, RawBody, State};
use common_telemetry::metric;
use metrics::counter;
-use servers::http::handler as http_handler;
-use servers::http::script as script_handler;
-use servers::http::{ApiState, JsonOutput};
+use servers::http::{handler as http_handler, script as script_handler, ApiState, JsonOutput};
use table::test_util::MemTable;
use crate::{create_testing_script_handler, create_testing_sql_query_handler};
diff --git a/src/servers/tests/http/prometheus_test.rs b/src/servers/tests/http/prometheus_test.rs
index 0ec703a3bb29..05c26623dbc3 100644
--- a/src/servers/tests/http/prometheus_test.rs
+++ b/src/servers/tests/http/prometheus_test.rs
@@ -11,8 +11,7 @@ use prost::Message;
use servers::error::Result;
use servers::http::HttpServer;
use servers::prometheus;
-use servers::prometheus::snappy_compress;
-use servers::prometheus::Metrics;
+use servers::prometheus::{snappy_compress, Metrics};
use servers::query_handler::{PrometheusProtocolHandler, PrometheusResponse, SqlQueryHandler};
use tokio::sync::mpsc;
diff --git a/src/servers/tests/mod.rs b/src/servers/tests/mod.rs
index 11a0d7f65cba..4b27848bee7f 100644
--- a/src/servers/tests/mod.rs
+++ b/src/servers/tests/mod.rs
@@ -8,16 +8,15 @@ use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_query::Output;
use query::{QueryEngineFactory, QueryEngineRef};
use servers::error::Result;
-use servers::query_handler::ScriptHandlerRef;
-use servers::query_handler::{ScriptHandler, SqlQueryHandler, SqlQueryHandlerRef};
+use servers::query_handler::{
+ ScriptHandler, ScriptHandlerRef, SqlQueryHandler, SqlQueryHandlerRef,
+};
use table::test_util::MemTable;
mod http;
mod mysql;
-use script::{
- engine::{CompileContext, EvalContext, Script, ScriptEngine},
- python::{PyEngine, PyScript},
-};
+use script::engine::{CompileContext, EvalContext, Script, ScriptEngine};
+use script::python::{PyEngine, PyScript};
mod opentsdb;
mod postgres;
diff --git a/src/servers/tests/mysql/mod.rs b/src/servers/tests/mysql/mod.rs
index ddaa407dede0..7f69ec38d0a7 100644
--- a/src/servers/tests/mysql/mod.rs
+++ b/src/servers/tests/mysql/mod.rs
@@ -8,8 +8,7 @@ use datatypes::vectors::{
UInt8Vector,
};
use mysql_async::prelude::FromRow;
-use mysql_async::FromRowError;
-use mysql_async::Value as MysqlValue;
+use mysql_async::{FromRowError, Value as MysqlValue};
use opensrv_mysql::ColumnType;
mod mysql_server_test;
diff --git a/src/servers/tests/opentsdb.rs b/src/servers/tests/opentsdb.rs
index 7a4fffb8e4d2..3caed762d0de 100644
--- a/src/servers/tests/opentsdb.rs
+++ b/src/servers/tests/opentsdb.rs
@@ -14,8 +14,7 @@ use servers::opentsdb::OpentsdbServer;
use servers::query_handler::OpentsdbProtocolHandler;
use servers::server::Server;
use tokio::net::TcpStream;
-use tokio::sync::mpsc;
-use tokio::sync::Notify;
+use tokio::sync::{mpsc, Notify};
struct DummyOpentsdbInstance {
tx: mpsc::Sender<i32>,
diff --git a/src/sql/src/parser.rs b/src/sql/src/parser.rs
index 209c1a271934..5eceb83e4a7f 100644
--- a/src/sql/src/parser.rs
+++ b/src/sql/src/parser.rs
@@ -1,8 +1,7 @@
use snafu::{ensure, ResultExt};
use sqlparser::dialect::Dialect;
use sqlparser::keywords::Keyword;
-use sqlparser::parser::Parser;
-use sqlparser::parser::ParserError;
+use sqlparser::parser::{Parser, ParserError};
use sqlparser::tokenizer::{Token, Tokenizer};
use crate::error::{
diff --git a/src/sql/src/parsers/create_parser.rs b/src/sql/src/parsers/create_parser.rs
index 41df8174cd6f..e99bc52ba391 100644
--- a/src/sql/src/parsers/create_parser.rs
+++ b/src/sql/src/parsers/create_parser.rs
@@ -2,8 +2,7 @@ use std::cmp::Ordering;
use itertools::Itertools;
use once_cell::sync::Lazy;
-use snafu::ResultExt;
-use snafu::{ensure, OptionExt};
+use snafu::{ensure, OptionExt, ResultExt};
use sqlparser::ast::Value;
use sqlparser::dialect::keywords::Keyword;
use sqlparser::parser::IsOptional::Mandatory;
diff --git a/src/storage/benches/memtable/bench_memtable_read.rs b/src/storage/benches/memtable/bench_memtable_read.rs
index aaa0623fdb19..9a75224534d1 100644
--- a/src/storage/benches/memtable/bench_memtable_read.rs
+++ b/src/storage/benches/memtable/bench_memtable_read.rs
@@ -1,6 +1,7 @@
use criterion::{criterion_group, criterion_main, Criterion, Throughput};
-use crate::memtable::{generate_kvs, util::bench_context::BenchContext};
+use crate::memtable::generate_kvs;
+use crate::memtable::util::bench_context::BenchContext;
fn bench_memtable_read(c: &mut Criterion) {
// the length of string in value is 20
diff --git a/src/storage/benches/memtable/bench_memtable_read_write_ratio.rs b/src/storage/benches/memtable/bench_memtable_read_write_ratio.rs
index bb11662db498..417e62e1711f 100644
--- a/src/storage/benches/memtable/bench_memtable_read_write_ratio.rs
+++ b/src/storage/benches/memtable/bench_memtable_read_write_ratio.rs
@@ -1,11 +1,7 @@
-use std::{
- sync::{
- atomic::{AtomicBool, AtomicUsize, Ordering},
- Arc,
- },
- thread,
- time::Instant,
-};
+use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
+use std::sync::Arc;
+use std::thread;
+use std::time::Instant;
use atomic_float::AtomicF64;
use criterion::{
@@ -13,7 +9,8 @@ use criterion::{
};
use rand::Rng;
-use crate::memtable::{generate_kvs, util::bench_context::BenchContext};
+use crate::memtable::generate_kvs;
+use crate::memtable::util::bench_context::BenchContext;
static READ_NUM: AtomicUsize = AtomicUsize::new(0);
static WRITE_NUM: AtomicUsize = AtomicUsize::new(0);
diff --git a/src/storage/benches/memtable/mod.rs b/src/storage/benches/memtable/mod.rs
index 1b0f56914e6b..1582a76baaee 100644
--- a/src/storage/benches/memtable/mod.rs
+++ b/src/storage/benches/memtable/mod.rs
@@ -3,17 +3,15 @@ pub mod bench_memtable_read_write_ratio;
pub mod bench_memtable_write;
pub mod util;
-use std::sync::{
- atomic::{AtomicU64, Ordering},
- Arc,
-};
+use std::sync::atomic::{AtomicU64, Ordering};
+use std::sync::Arc;
use common_time::Timestamp;
-use datatypes::{
- prelude::ScalarVectorBuilder,
- vectors::{StringVectorBuilder, TimestampVectorBuilder, UInt64VectorBuilder},
-};
-use rand::{distributions::Alphanumeric, prelude::ThreadRng, Rng};
+use datatypes::prelude::ScalarVectorBuilder;
+use datatypes::vectors::{StringVectorBuilder, TimestampVectorBuilder, UInt64VectorBuilder};
+use rand::distributions::Alphanumeric;
+use rand::prelude::ThreadRng;
+use rand::Rng;
use storage::memtable::KeyValues;
use store_api::storage::{OpType, SequenceNumber};
diff --git a/src/storage/benches/memtable/util/mod.rs b/src/storage/benches/memtable/util/mod.rs
index bbe7f9a0d019..410a935ee863 100644
--- a/src/storage/benches/memtable/util/mod.rs
+++ b/src/storage/benches/memtable/util/mod.rs
@@ -3,11 +3,9 @@ pub mod regiondesc_util;
pub mod schema_util;
use datatypes::type_id::LogicalTypeId;
-use storage::{
- memtable::{DefaultMemtableBuilder, MemtableBuilder, MemtableRef},
- metadata::RegionMetadata,
- schema::RegionSchemaRef,
-};
+use storage::memtable::{DefaultMemtableBuilder, MemtableBuilder, MemtableRef};
+use storage::metadata::RegionMetadata;
+use storage::schema::RegionSchemaRef;
use crate::memtable::util::regiondesc_util::RegionDescBuilder;
diff --git a/src/storage/benches/memtable/util/regiondesc_util.rs b/src/storage/benches/memtable/util/regiondesc_util.rs
index db88f76ad71b..82ace380127c 100644
--- a/src/storage/benches/memtable/util/regiondesc_util.rs
+++ b/src/storage/benches/memtable/util/regiondesc_util.rs
@@ -4,7 +4,8 @@ use store_api::storage::{
RegionDescriptor, RowKeyDescriptorBuilder,
};
-use super::{schema_util::ColumnDef, TIMESTAMP_NAME};
+use super::schema_util::ColumnDef;
+use super::TIMESTAMP_NAME;
pub struct RegionDescBuilder {
name: String,
diff --git a/src/storage/benches/wal/util/mod.rs b/src/storage/benches/wal/util/mod.rs
index ebff52cceb77..5efa5cc64d46 100644
--- a/src/storage/benches/wal/util/mod.rs
+++ b/src/storage/benches/wal/util/mod.rs
@@ -2,16 +2,14 @@ pub mod write_batch_util;
use std::sync::Arc;
-use datatypes::{
- prelude::ScalarVector,
- type_id::LogicalTypeId,
- vectors::{BooleanVector, Float64Vector, StringVector, TimestampVector, UInt64Vector},
+use datatypes::prelude::ScalarVector;
+use datatypes::type_id::LogicalTypeId;
+use datatypes::vectors::{
+ BooleanVector, Float64Vector, StringVector, TimestampVector, UInt64Vector,
};
use rand::Rng;
-use storage::{
- proto,
- write_batch::{PutData, WriteBatch},
-};
+use storage::proto;
+use storage::write_batch::{PutData, WriteBatch};
use store_api::storage::{consts, PutOperation, WriteRequest};
pub fn new_test_batch() -> WriteBatch {
diff --git a/src/storage/src/engine.rs b/src/storage/src/engine.rs
index 016ade9d2273..6863d134206e 100644
--- a/src/storage/src/engine.rs
+++ b/src/storage/src/engine.rs
@@ -5,9 +5,9 @@ use async_trait::async_trait;
use common_telemetry::logging::info;
use object_store::{util, ObjectStore};
use snafu::ResultExt;
-use store_api::{
- logstore::LogStore,
- storage::{CreateOptions, EngineContext, OpenOptions, RegionDescriptor, StorageEngine},
+use store_api::logstore::LogStore;
+use store_api::storage::{
+ CreateOptions, EngineContext, OpenOptions, RegionDescriptor, StorageEngine,
};
use crate::background::JobPoolImpl;
diff --git a/src/storage/src/flush.rs b/src/storage/src/flush.rs
index 06f2da090c47..fef253bf522d 100644
--- a/src/storage/src/flush.rs
+++ b/src/storage/src/flush.rs
@@ -12,8 +12,7 @@ use crate::error::{CancelledSnafu, Result};
use crate::manifest::action::*;
use crate::manifest::region::RegionManifest;
use crate::memtable::{IterContext, MemtableId, MemtableRef};
-use crate::region::RegionWriterRef;
-use crate::region::SharedDataRef;
+use crate::region::{RegionWriterRef, SharedDataRef};
use crate::sst::{AccessLayerRef, FileMeta, WriteOptions};
use crate::wal::Wal;
diff --git a/src/storage/src/manifest/action.rs b/src/storage/src/manifest/action.rs
index 2b8a33ccef0b..b28d195b05a1 100644
--- a/src/storage/src/manifest/action.rs
+++ b/src/storage/src/manifest/action.rs
@@ -4,10 +4,8 @@ use serde::{Deserialize, Serialize};
use serde_json as json;
use snafu::{ensure, OptionExt, ResultExt};
use store_api::manifest::action::{ProtocolAction, ProtocolVersion, VersionHeader};
-use store_api::manifest::ManifestVersion;
-use store_api::manifest::MetaAction;
-use store_api::storage::RegionId;
-use store_api::storage::SequenceNumber;
+use store_api::manifest::{ManifestVersion, MetaAction};
+use store_api::storage::{RegionId, SequenceNumber};
use crate::error::{
self, DecodeJsonSnafu, DecodeMetaActionListSnafu, ManifestProtocolForbidReadSnafu,
diff --git a/src/storage/src/manifest/impl_.rs b/src/storage/src/manifest/impl_.rs
index 101fd2d911cf..6f229fa47314 100644
--- a/src/storage/src/manifest/impl_.rs
+++ b/src/storage/src/manifest/impl_.rs
@@ -1,8 +1,6 @@
use std::marker::PhantomData;
-use std::sync::{
- atomic::{AtomicU64, Ordering},
- Arc,
-};
+use std::sync::atomic::{AtomicU64, Ordering};
+use std::sync::Arc;
use arc_swap::ArcSwap;
use async_trait::async_trait;
@@ -13,8 +11,7 @@ use store_api::manifest::action::{self, ProtocolAction, ProtocolVersion};
use store_api::manifest::*;
use crate::error::{Error, ManifestProtocolForbidWriteSnafu, Result};
-use crate::manifest::storage::ManifestObjectStore;
-use crate::manifest::storage::ObjectStoreLogIterator;
+use crate::manifest::storage::{ManifestObjectStore, ObjectStoreLogIterator};
#[derive(Clone, Debug)]
pub struct ManifestImpl<M: MetaAction<Error = Error>> {
diff --git a/src/storage/src/manifest/region.rs b/src/storage/src/manifest/region.rs
index df04aaf404eb..ce3763dfb27e 100644
--- a/src/storage/src/manifest/region.rs
+++ b/src/storage/src/manifest/region.rs
@@ -8,7 +8,8 @@ pub type RegionManifest = ManifestImpl<RegionMetaActionList>;
mod tests {
use std::sync::Arc;
- use object_store::{backend::fs, ObjectStore};
+ use object_store::backend::fs;
+ use object_store::ObjectStore;
use store_api::manifest::action::ProtocolAction;
use store_api::manifest::{Manifest, MetaActionIterator, MAX_VERSION};
use tempdir::TempDir;
diff --git a/src/storage/src/manifest/storage.rs b/src/storage/src/manifest/storage.rs
index c651d1c947eb..06abcf6c058e 100644
--- a/src/storage/src/manifest/storage.rs
+++ b/src/storage/src/manifest/storage.rs
@@ -264,7 +264,8 @@ impl ManifestLogStorage for ManifestObjectStore {
#[cfg(test)]
mod tests {
- use object_store::{backend::fs, ObjectStore};
+ use object_store::backend::fs;
+ use object_store::ObjectStore;
use tempdir::TempDir;
use super::*;
diff --git a/src/storage/src/memtable/btree.rs b/src/storage/src/memtable/btree.rs
index f6b6e9b603e4..4fe96d28b9fc 100644
--- a/src/storage/src/memtable/btree.rs
+++ b/src/storage/src/memtable/btree.rs
@@ -1,10 +1,8 @@
use std::cmp::Ordering;
use std::collections::{btree_map, BTreeMap};
use std::ops::Bound;
-use std::sync::{
- atomic::{AtomicUsize, Ordering as AtomicOrdering},
- Arc, RwLock,
-};
+use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrdering};
+use std::sync::{Arc, RwLock};
use datatypes::prelude::*;
use datatypes::value::Value;
diff --git a/src/storage/src/memtable/inserter.rs b/src/storage/src/memtable/inserter.rs
index d63a7c9a3789..64d6f9e8dda0 100644
--- a/src/storage/src/memtable/inserter.rs
+++ b/src/storage/src/memtable/inserter.rs
@@ -124,8 +124,9 @@ mod tests {
use std::sync::Arc;
use common_time::timestamp::Timestamp;
+ use datatypes::type_id::LogicalTypeId;
+ use datatypes::value::Value;
use datatypes::vectors::{Int64Vector, TimestampVector};
- use datatypes::{type_id::LogicalTypeId, value::Value};
use store_api::storage::{PutOperation, WriteRequest};
use super::*;
diff --git a/src/storage/src/memtable/version.rs b/src/storage/src/memtable/version.rs
index 9406f3869bbe..f6f764f64db2 100644
--- a/src/storage/src/memtable/version.rs
+++ b/src/storage/src/memtable/version.rs
@@ -109,8 +109,7 @@ mod tests {
use std::sync::Arc;
use super::*;
- use crate::memtable::DefaultMemtableBuilder;
- use crate::memtable::MemtableBuilder;
+ use crate::memtable::{DefaultMemtableBuilder, MemtableBuilder};
use crate::test_util::schema_util;
#[test]
diff --git a/src/storage/src/metadata.rs b/src/storage/src/metadata.rs
index eca8314fc62c..580327b6d2fe 100644
--- a/src/storage/src/metadata.rs
+++ b/src/storage/src/metadata.rs
@@ -8,8 +8,8 @@ use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnSchema, Metadata};
use serde::{Deserialize, Serialize};
use snafu::{ensure, OptionExt};
+use store_api::storage::consts::{self, ReservedColumnId};
use store_api::storage::{
- consts::{self, ReservedColumnId},
AddColumn, AlterOperation, AlterRequest, ColumnDescriptor, ColumnDescriptorBuilder,
ColumnDescriptorBuilderError, ColumnFamilyDescriptor, ColumnFamilyDescriptorBuilder,
ColumnFamilyId, ColumnId, RegionDescriptor, RegionDescriptorBuilder, RegionId, RegionMeta,
diff --git a/src/storage/src/proto/write_batch.rs b/src/storage/src/proto/write_batch.rs
index 7289750cf73b..a0885be45cd1 100644
--- a/src/storage/src/proto/write_batch.rs
+++ b/src/storage/src/proto/write_batch.rs
@@ -5,19 +5,16 @@ use std::sync::Arc;
use common_base::BitVec;
use common_error::prelude::*;
+use datatypes::data_type::ConcreteDataType;
+use datatypes::prelude::{ScalarVector, ScalarVectorBuilder};
use datatypes::schema;
-use datatypes::{
- data_type::ConcreteDataType,
- prelude::{ScalarVector, ScalarVectorBuilder},
- vectors::{
- BinaryVector, BinaryVectorBuilder, BooleanVector, BooleanVectorBuilder, Float32Vector,
- Float32VectorBuilder, Float64Vector, Float64VectorBuilder, Int16Vector, Int16VectorBuilder,
- Int32Vector, Int32VectorBuilder, Int64Vector, Int64VectorBuilder, Int8Vector,
- Int8VectorBuilder, StringVector, StringVectorBuilder, TimestampVector,
- TimestampVectorBuilder, UInt16Vector, UInt16VectorBuilder, UInt32Vector,
- UInt32VectorBuilder, UInt64Vector, UInt64VectorBuilder, UInt8Vector, UInt8VectorBuilder,
- Vector, VectorRef,
- },
+use datatypes::vectors::{
+ BinaryVector, BinaryVectorBuilder, BooleanVector, BooleanVectorBuilder, Float32Vector,
+ Float32VectorBuilder, Float64Vector, Float64VectorBuilder, Int16Vector, Int16VectorBuilder,
+ Int32Vector, Int32VectorBuilder, Int64Vector, Int64VectorBuilder, Int8Vector,
+ Int8VectorBuilder, StringVector, StringVectorBuilder, TimestampVector, TimestampVectorBuilder,
+ UInt16Vector, UInt16VectorBuilder, UInt32Vector, UInt32VectorBuilder, UInt64Vector,
+ UInt64VectorBuilder, UInt8Vector, UInt8VectorBuilder, Vector, VectorRef,
};
use paste::paste;
use snafu::OptionExt;
diff --git a/src/storage/src/region.rs b/src/storage/src/region.rs
index 1569136b80ad..1ee1364f5016 100644
--- a/src/storage/src/region.rs
+++ b/src/storage/src/region.rs
@@ -16,18 +16,19 @@ use store_api::storage::{
use crate::error::{self, Error, Result};
use crate::flush::{FlushSchedulerRef, FlushStrategyRef};
-use crate::manifest::{
- action::{RawRegionMetadata, RegionChange, RegionMetaAction, RegionMetaActionList},
- region::RegionManifest,
+use crate::manifest::action::{
+ RawRegionMetadata, RegionChange, RegionMetaAction, RegionMetaActionList,
};
+use crate::manifest::region::RegionManifest;
use crate::memtable::MemtableBuilderRef;
use crate::metadata::{RegionMetaImpl, RegionMetadata, RegionMetadataRef};
pub use crate::region::writer::{AlterContext, RegionWriter, RegionWriterRef, WriterContext};
use crate::schema::compat::CompatWrite;
use crate::snapshot::SnapshotImpl;
use crate::sst::AccessLayerRef;
-use crate::version::VersionEdit;
-use crate::version::{Version, VersionControl, VersionControlRef, INIT_COMMITTED_SEQUENCE};
+use crate::version::{
+ Version, VersionControl, VersionControlRef, VersionEdit, INIT_COMMITTED_SEQUENCE,
+};
use crate::wal::Wal;
use crate::write_batch::WriteBatch;
diff --git a/src/storage/src/region/tests.rs b/src/storage/src/region/tests.rs
index 67aec5fa32f0..c76c5d3ad215 100644
--- a/src/storage/src/region/tests.rs
+++ b/src/storage/src/region/tests.rs
@@ -10,8 +10,10 @@ use common_time::timestamp::Timestamp;
use datatypes::prelude::ScalarVector;
use datatypes::type_id::LogicalTypeId;
use datatypes::vectors::{Int64Vector, TimestampVector};
-use log_store::fs::{log::LocalFileLogStore, noop::NoopLogStore};
-use object_store::{backend::fs, ObjectStore};
+use log_store::fs::log::LocalFileLogStore;
+use log_store::fs::noop::NoopLogStore;
+use object_store::backend::fs;
+use object_store::ObjectStore;
use store_api::storage::{
consts, Chunk, ChunkReader, PutOperation, ScanRequest, SequenceNumber, Snapshot, WriteRequest,
};
@@ -21,9 +23,8 @@ use super::*;
use crate::manifest::action::{RegionChange, RegionMetaActionList};
use crate::manifest::test_utils::*;
use crate::memtable::DefaultMemtableBuilder;
-use crate::test_util::{
- self, config_util, descriptor_util::RegionDescBuilder, schema_util, write_batch_util,
-};
+use crate::test_util::descriptor_util::RegionDescBuilder;
+use crate::test_util::{self, config_util, schema_util, write_batch_util};
use crate::write_batch::PutData;
/// Create metadata of a region with schema: (timestamp, v0).
diff --git a/src/storage/src/region/tests/alter.rs b/src/storage/src/region/tests/alter.rs
index 72d213ec4380..5cff8fba3914 100644
--- a/src/storage/src/region/tests/alter.rs
+++ b/src/storage/src/region/tests/alter.rs
@@ -5,19 +5,15 @@ use common_time::Timestamp;
use datatypes::prelude::*;
use datatypes::vectors::{Int64Vector, TimestampVector};
use log_store::fs::log::LocalFileLogStore;
-use store_api::storage::PutOperation;
-use store_api::storage::WriteRequest;
use store_api::storage::{
AddColumn, AlterOperation, AlterRequest, Chunk, ChunkReader, ColumnDescriptor,
- ColumnDescriptorBuilder, ColumnId, Region, RegionMeta, ScanRequest, SchemaRef, Snapshot,
- WriteResponse,
+ ColumnDescriptorBuilder, ColumnId, PutOperation, Region, RegionMeta, ScanRequest, SchemaRef,
+ Snapshot, WriteRequest, WriteResponse,
};
use tempdir::TempDir;
use crate::region::tests::{self, FileTesterBase};
-use crate::region::OpenOptions;
-use crate::region::RegionImpl;
-use crate::region::{RawRegionMetadata, RegionMetadata};
+use crate::region::{OpenOptions, RawRegionMetadata, RegionImpl, RegionMetadata};
use crate::test_util;
use crate::test_util::config_util;
use crate::test_util::descriptor_util::RegionDescBuilder;
diff --git a/src/storage/src/region/tests/projection.rs b/src/storage/src/region/tests/projection.rs
index 04a09184638d..72a5d0e2c585 100644
--- a/src/storage/src/region/tests/projection.rs
+++ b/src/storage/src/region/tests/projection.rs
@@ -12,8 +12,7 @@ use store_api::storage::{
};
use tempdir::TempDir;
-use crate::region::RegionImpl;
-use crate::region::RegionMetadata;
+use crate::region::{RegionImpl, RegionMetadata};
use crate::test_util::{self, config_util, descriptor_util, write_batch_util};
use crate::write_batch::{PutData, WriteBatch};
diff --git a/src/storage/src/region/writer.rs b/src/storage/src/region/writer.rs
index 8b462e788bba..34accb1401b4 100644
--- a/src/storage/src/region/writer.rs
+++ b/src/storage/src/region/writer.rs
@@ -5,8 +5,7 @@ use futures::TryStreamExt;
use snafu::ResultExt;
use store_api::logstore::LogStore;
use store_api::manifest::{Manifest, ManifestVersion, MetaAction};
-use store_api::storage::SequenceNumber;
-use store_api::storage::{AlterRequest, WriteContext, WriteResponse};
+use store_api::storage::{AlterRequest, SequenceNumber, WriteContext, WriteResponse};
use tokio::sync::Mutex;
use crate::background::JobHandle;
@@ -21,8 +20,7 @@ use crate::proto::wal::WalHeader;
use crate::region::{RecoverdMetadata, RecoveredMetadataMap, RegionManifest, SharedDataRef};
use crate::schema::compat::CompatWrite;
use crate::sst::AccessLayerRef;
-use crate::version::VersionControl;
-use crate::version::{VersionControlRef, VersionEdit};
+use crate::version::{VersionControl, VersionControlRef, VersionEdit};
use crate::wal::{Payload, Wal};
use crate::write_batch::WriteBatch;
diff --git a/src/storage/src/schema/compat.rs b/src/storage/src/schema/compat.rs
index f87b83dd768d..4fcd9210dd27 100644
--- a/src/storage/src/schema/compat.rs
+++ b/src/storage/src/schema/compat.rs
@@ -310,16 +310,13 @@ impl ReadAdapter {
#[cfg(test)]
mod tests {
use datatypes::data_type::ConcreteDataType;
- use store_api::storage::consts;
- use store_api::storage::ColumnDescriptorBuilder;
+ use store_api::storage::{consts, ColumnDescriptorBuilder};
use super::*;
use crate::error::Error;
use crate::metadata::RegionMetadata;
- use crate::schema::tests;
- use crate::schema::{ProjectedSchema, RegionSchema};
- use crate::test_util::descriptor_util;
- use crate::test_util::schema_util;
+ use crate::schema::{tests, ProjectedSchema, RegionSchema};
+ use crate::test_util::{descriptor_util, schema_util};
fn check_fields(fields: &[Field], names: &[&str]) {
for (field, name) in fields.iter().zip(names) {
diff --git a/src/storage/src/sst/parquet.rs b/src/storage/src/sst/parquet.rs
index fc65530681c3..f96b9129e551 100644
--- a/src/storage/src/sst/parquet.rs
+++ b/src/storage/src/sst/parquet.rs
@@ -296,8 +296,9 @@ mod tests {
use tempdir::TempDir;
use super::*;
- use crate::memtable::tests as memtable_tests;
- use crate::memtable::{DefaultMemtableBuilder, IterContext, MemtableBuilder};
+ use crate::memtable::{
+ tests as memtable_tests, DefaultMemtableBuilder, IterContext, MemtableBuilder,
+ };
#[tokio::test]
async fn test_parquet_writer() {
diff --git a/src/storage/src/test_util/config_util.rs b/src/storage/src/test_util/config_util.rs
index b72ff3fe7d3e..829c4873e863 100644
--- a/src/storage/src/test_util/config_util.rs
+++ b/src/storage/src/test_util/config_util.rs
@@ -1,7 +1,9 @@
use std::sync::Arc;
-use log_store::fs::{config::LogConfig, log::LocalFileLogStore};
-use object_store::{backend::fs::Builder, ObjectStore};
+use log_store::fs::config::LogConfig;
+use log_store::fs::log::LocalFileLogStore;
+use object_store::backend::fs::Builder;
+use object_store::ObjectStore;
use crate::background::JobPoolImpl;
use crate::engine;
diff --git a/src/storage/src/test_util/descriptor_util.rs b/src/storage/src/test_util/descriptor_util.rs
index a02712bd3d29..21f18826a85b 100644
--- a/src/storage/src/test_util/descriptor_util.rs
+++ b/src/storage/src/test_util/descriptor_util.rs
@@ -5,7 +5,8 @@ use store_api::storage::{
RegionDescriptor, RegionId, RowKeyDescriptorBuilder,
};
-use crate::test_util::{self, schema_util::ColumnDef};
+use crate::test_util::schema_util::ColumnDef;
+use crate::test_util::{self};
/// A RegionDescriptor builder for test.
pub struct RegionDescBuilder {
diff --git a/src/storage/src/version.rs b/src/storage/src/version.rs
index 76aedac913f6..bf42615e2357 100644
--- a/src/storage/src/version.rs
+++ b/src/storage/src/version.rs
@@ -16,8 +16,7 @@ use store_api::storage::{SchemaRef, SequenceNumber};
use crate::memtable::{MemtableId, MemtableRef, MemtableVersion};
use crate::metadata::RegionMetadataRef;
use crate::schema::RegionSchemaRef;
-use crate::sst::LevelMetas;
-use crate::sst::{FileHandle, FileMeta};
+use crate::sst::{FileHandle, FileMeta, LevelMetas};
use crate::sync::CowCell;
pub const INIT_COMMITTED_SEQUENCE: u64 = 0;
diff --git a/src/storage/src/wal.rs b/src/storage/src/wal.rs
index d48101a9f619..3a5ed29bdc70 100644
--- a/src/storage/src/wal.rs
+++ b/src/storage/src/wal.rs
@@ -5,24 +5,18 @@ use common_error::prelude::BoxedError;
use futures::{stream, Stream, TryStreamExt};
use prost::Message;
use snafu::{ensure, ResultExt};
-use store_api::storage::RegionId;
-use store_api::{
- logstore::{entry::Entry, AppendResponse, LogStore},
- storage::SequenceNumber,
-};
-
-use crate::{
- codec::{Decoder, Encoder},
- error::{self, Error, Result},
- proto::wal::{self, PayloadType, WalHeader},
- write_batch::{
- codec::{
- WriteBatchArrowDecoder, WriteBatchArrowEncoder, WriteBatchProtobufDecoder,
- WriteBatchProtobufEncoder,
- },
- WriteBatch,
- },
+use store_api::logstore::entry::Entry;
+use store_api::logstore::{AppendResponse, LogStore};
+use store_api::storage::{RegionId, SequenceNumber};
+
+use crate::codec::{Decoder, Encoder};
+use crate::error::{self, Error, Result};
+use crate::proto::wal::{self, PayloadType, WalHeader};
+use crate::write_batch::codec::{
+ WriteBatchArrowDecoder, WriteBatchArrowEncoder, WriteBatchProtobufDecoder,
+ WriteBatchProtobufEncoder,
};
+use crate::write_batch::WriteBatch;
#[derive(Debug)]
pub struct Wal<S: LogStore> {
diff --git a/src/storage/src/write_batch.rs b/src/storage/src/write_batch.rs
index 2e15dbabe530..87883d3f6ede 100644
--- a/src/storage/src/write_batch.rs
+++ b/src/storage/src/write_batch.rs
@@ -1,21 +1,18 @@
mod compat;
-use std::{
- any::Any,
- collections::{BTreeSet, HashMap},
- slice,
- time::Duration,
-};
+use std::any::Any;
+use std::collections::{BTreeSet, HashMap};
+use std::slice;
+use std::time::Duration;
use common_error::prelude::*;
use common_time::timestamp_millis::BucketAligned;
use common_time::RangeMillis;
+use datatypes::arrow::error::ArrowError;
+use datatypes::data_type::ConcreteDataType;
+use datatypes::prelude::{ScalarVector, Value};
use datatypes::schema::{ColumnSchema, SchemaRef};
-use datatypes::vectors::{Int64Vector, TimestampVector};
-use datatypes::{
- arrow::error::ArrowError, data_type::ConcreteDataType, prelude::ScalarVector, prelude::Value,
- vectors::VectorRef,
-};
+use datatypes::vectors::{Int64Vector, TimestampVector, VectorRef};
use prost::{DecodeError, EncodeError};
use snafu::{ensure, OptionExt, ResultExt};
use store_api::storage::{consts, PutOperation, WriteRequest};
@@ -491,34 +488,27 @@ impl<'a> IntoIterator for &'a WriteBatch {
pub mod codec {
- use std::{io::Cursor, sync::Arc};
-
- use datatypes::{
- arrow::{
- chunk::Chunk as ArrowChunk,
- io::ipc::{
- read::{self, StreamReader, StreamState},
- write::{StreamWriter, WriteOptions},
- },
- },
- schema::{Schema, SchemaRef},
- vectors::Helper,
- };
+ use std::io::Cursor;
+ use std::sync::Arc;
+
+ use datatypes::arrow::chunk::Chunk as ArrowChunk;
+ use datatypes::arrow::io::ipc::read::{self, StreamReader, StreamState};
+ use datatypes::arrow::io::ipc::write::{StreamWriter, WriteOptions};
+ use datatypes::schema::{Schema, SchemaRef};
+ use datatypes::vectors::Helper;
use prost::Message;
use snafu::{ensure, OptionExt, ResultExt};
use store_api::storage::WriteRequest;
use crate::codec::{Decoder, Encoder};
- use crate::proto::{
- wal::MutationType,
- write_batch::{self, gen_columns, gen_put_data_vector},
- };
+ use crate::proto::wal::MutationType;
+ use crate::proto::write_batch::{self, gen_columns, gen_put_data_vector};
use crate::write_batch::{
- DataCorruptedSnafu, DecodeArrowSnafu, DecodeVectorSnafu, EncodeArrowSnafu,
- Error as WriteBatchError, FromProtobufSnafu, MissingColumnSnafu, Mutation,
- ParseSchemaSnafu, Result, StreamWaitingSnafu, ToProtobufSnafu, WriteBatch,
+ DataCorruptedSnafu, DecodeArrowSnafu, DecodeProtobufSnafu, DecodeVectorSnafu,
+ EncodeArrowSnafu, EncodeProtobufSnafu, Error as WriteBatchError, FromProtobufSnafu,
+ MissingColumnSnafu, Mutation, ParseSchemaSnafu, PutData, Result, StreamWaitingSnafu,
+ ToProtobufSnafu, WriteBatch,
};
- use crate::write_batch::{DecodeProtobufSnafu, EncodeProtobufSnafu, PutData};
// TODO(jiachun): We can make a comparison with protobuf, including performance, storage cost,
// CPU consumption, etc
diff --git a/src/store-api/src/manifest.rs b/src/store-api/src/manifest.rs
index 3eaadc17b01f..1f1515f5aae9 100644
--- a/src/store-api/src/manifest.rs
+++ b/src/store-api/src/manifest.rs
@@ -4,10 +4,10 @@ mod storage;
use async_trait::async_trait;
use common_error::ext::ErrorExt;
-use serde::{de::DeserializeOwned, Serialize};
+use serde::de::DeserializeOwned;
+use serde::Serialize;
-use crate::manifest::action::ProtocolAction;
-use crate::manifest::action::ProtocolVersion;
+use crate::manifest::action::{ProtocolAction, ProtocolVersion};
pub use crate::manifest::storage::*;
pub type ManifestVersion = u64;
diff --git a/src/table-engine/src/engine.rs b/src/table-engine/src/engine.rs
index 8ebbbf2938dc..43e56616ba95 100644
--- a/src/table-engine/src/engine.rs
+++ b/src/table-engine/src/engine.rs
@@ -1,6 +1,5 @@
use std::collections::HashMap;
-use std::sync::Arc;
-use std::sync::RwLock;
+use std::sync::{Arc, RwLock};
use async_trait::async_trait;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
@@ -15,13 +14,10 @@ use store_api::storage::{
RegionId, RowKeyDescriptor, RowKeyDescriptorBuilder, StorageEngine,
};
use table::engine::{EngineContext, TableEngine, TableReference};
+use table::metadata::{TableId, TableInfoBuilder, TableMetaBuilder, TableType, TableVersion};
use table::requests::{AlterTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest};
-use table::Result as TableResult;
-use table::{
- metadata::{TableId, TableInfoBuilder, TableMetaBuilder, TableType, TableVersion},
- table::TableRef,
- Table,
-};
+use table::table::TableRef;
+use table::{Result as TableResult, Table};
use tokio::sync::Mutex;
use crate::config::EngineConfig;
@@ -470,11 +466,9 @@ impl<S: StorageEngine> MitoEngineInner<S> {
mod tests {
use common_query::physical_plan::RuntimeEnv;
use common_recordbatch::util;
- use datafusion_common::field_util::FieldExt;
- use datafusion_common::field_util::SchemaExt;
+ use datafusion_common::field_util::{FieldExt, SchemaExt};
use datatypes::prelude::{ConcreteDataType, ScalarVector};
- use datatypes::schema::ColumnSchema;
- use datatypes::schema::{ColumnDefaultConstraint, SchemaBuilder};
+ use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, SchemaBuilder};
use datatypes::value::Value;
use datatypes::vectors::*;
use log_store::fs::noop::NoopLogStore;
diff --git a/src/table-engine/src/manifest/action.rs b/src/table-engine/src/manifest/action.rs
index ce35f6dea5eb..a0711aff9242 100644
--- a/src/table-engine/src/manifest/action.rs
+++ b/src/table-engine/src/manifest/action.rs
@@ -9,8 +9,7 @@ use storage::error::{
};
use storage::manifest::helper;
use store_api::manifest::action::{ProtocolAction, ProtocolVersion, VersionHeader};
-use store_api::manifest::ManifestVersion;
-use store_api::manifest::MetaAction;
+use store_api::manifest::{ManifestVersion, MetaAction};
use table::metadata::{RawTableInfo, TableIdent};
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
diff --git a/src/table-engine/src/table.rs b/src/table-engine/src/table.rs
index 0e1c9b2003e3..0d2ce8de1424 100644
--- a/src/table-engine/src/table.rs
+++ b/src/table-engine/src/table.rs
@@ -24,13 +24,12 @@ use store_api::storage::{
RegionMeta, ScanRequest, SchemaRef, Snapshot, WriteContext, WriteRequest,
};
use table::error::{Error as TableError, MissingColumnSnafu, Result as TableResult};
-use table::metadata::{FilterPushDownType, TableInfoRef};
+use table::metadata::{
+ FilterPushDownType, RawTableInfo, TableInfo, TableInfoRef, TableMeta, TableType,
+};
use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest, InsertRequest};
use table::table::scan::SimpleTableScan;
-use table::{
- metadata::{RawTableInfo, TableInfo, TableMeta, TableType},
- table::Table,
-};
+use table::table::Table;
use tokio::sync::Mutex;
use crate::error::{
diff --git a/src/table-engine/src/table/test_util.rs b/src/table-engine/src/table/test_util.rs
index fc95b1bad607..15f6c063463d 100644
--- a/src/table-engine/src/table/test_util.rs
+++ b/src/table-engine/src/table/test_util.rs
@@ -7,22 +7,19 @@ use datatypes::prelude::ConcreteDataType;
use datatypes::schema::{ColumnSchema, Schema, SchemaBuilder, SchemaRef};
use datatypes::vectors::VectorRef;
use log_store::fs::noop::NoopLogStore;
-use object_store::{services::fs::Builder, ObjectStore};
+use object_store::services::fs::Builder;
+use object_store::ObjectStore;
use storage::config::EngineConfig as StorageEngineConfig;
use storage::EngineImpl;
-use table::engine::EngineContext;
-use table::engine::TableEngine;
+use table::engine::{EngineContext, TableEngine};
use table::metadata::{TableInfo, TableInfoBuilder, TableMetaBuilder, TableType};
-use table::requests::CreateTableRequest;
-use table::requests::InsertRequest;
+use table::requests::{CreateTableRequest, InsertRequest};
use table::TableRef;
use tempdir::TempDir;
use crate::config::EngineConfig;
-use crate::engine::MitoEngine;
-use crate::engine::MITO_ENGINE;
-pub use crate::table::test_util::mock_engine::MockEngine;
-pub use crate::table::test_util::mock_engine::MockRegion;
+use crate::engine::{MitoEngine, MITO_ENGINE};
+pub use crate::table::test_util::mock_engine::{MockEngine, MockRegion};
pub const TABLE_NAME: &str = "demo";
diff --git a/src/table/src/predicate.rs b/src/table/src/predicate.rs
index 9fcab030f3d5..058e5efbaf77 100644
--- a/src/table/src/predicate.rs
+++ b/src/table/src/predicate.rs
@@ -58,9 +58,7 @@ mod tests {
pub use datafusion::parquet::schema::types::{BasicTypeInfo, PhysicalType};
use datafusion_common::Column;
- use datafusion_expr::Expr;
- use datafusion_expr::Literal;
- use datafusion_expr::Operator;
+ use datafusion_expr::{Expr, Literal, Operator};
use datatypes::arrow::array::{Int32Array, Utf8Array};
use datatypes::arrow::chunk::Chunk;
use datatypes::arrow::datatypes::{DataType, Field, Schema};
@@ -68,8 +66,7 @@ mod tests {
use datatypes::arrow::io::parquet::write::{
Compression, Encoding, FileSink, Version, WriteOptions,
};
- use futures::AsyncWriteExt;
- use futures::SinkExt;
+ use futures::{AsyncWriteExt, SinkExt};
use tempdir::TempDir;
use tokio_util::compat::TokioAsyncWriteCompatExt;
diff --git a/src/table/src/table/adapter.rs b/src/table/src/table/adapter.rs
index 06b002f98ea4..d970f7a62d4b 100644
--- a/src/table/src/table/adapter.rs
+++ b/src/table/src/table/adapter.rs
@@ -6,10 +6,8 @@ use common_query::physical_plan::{DfPhysicalPlanAdapter, PhysicalPlanAdapter, Ph
use common_query::DfPhysicalPlan;
use common_telemetry::debug;
use datafusion::arrow::datatypes::SchemaRef as DfSchemaRef;
-use datafusion::datasource::{
- datasource::TableProviderFilterPushDown as DfTableProviderFilterPushDown, TableProvider,
- TableType as DfTableType,
-};
+use datafusion::datasource::datasource::TableProviderFilterPushDown as DfTableProviderFilterPushDown;
+use datafusion::datasource::{TableProvider, TableType as DfTableType};
use datafusion::error::Result as DfResult;
use datafusion::logical_plan::Expr as DfExpr;
use datatypes::schema::{SchemaRef as TableSchemaRef, SchemaRef};
diff --git a/src/table/src/table/scan.rs b/src/table/src/table/scan.rs
index 372e35c6775e..6f1fe0f7cbe9 100644
--- a/src/table/src/table/scan.rs
+++ b/src/table/src/table/scan.rs
@@ -5,9 +5,7 @@ use std::sync::{Arc, Mutex};
use async_trait::async_trait;
use common_query::error as query_error;
use common_query::error::Result as QueryResult;
-use common_query::physical_plan::Partitioning;
-use common_query::physical_plan::RuntimeEnv;
-use common_query::physical_plan::{PhysicalPlan, PhysicalPlanRef};
+use common_query::physical_plan::{Partitioning, PhysicalPlan, PhysicalPlanRef, RuntimeEnv};
use common_recordbatch::SendableRecordBatchStream;
use datatypes::schema::SchemaRef;
use snafu::OptionExt;
@@ -70,8 +68,7 @@ impl PhysicalPlan for SimpleTableScan {
#[cfg(test)]
mod test {
- use common_recordbatch::util;
- use common_recordbatch::{RecordBatch, RecordBatches};
+ use common_recordbatch::{util, RecordBatch, RecordBatches};
use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnSchema, Schema};
use datatypes::vectors::Int32Vector;
diff --git a/src/table/src/test_util/empty_table.rs b/src/table/src/test_util/empty_table.rs
index 7ef87eda36c7..eba10de20a6e 100644
--- a/src/table/src/test_util/empty_table.rs
+++ b/src/table/src/test_util/empty_table.rs
@@ -4,16 +4,10 @@ use async_trait::async_trait;
use common_query::physical_plan::PhysicalPlanRef;
use common_recordbatch::EmptyRecordBatchStream;
-use crate::metadata::TableInfoBuilder;
-use crate::metadata::TableInfoRef;
-use crate::requests::InsertRequest;
+use crate::metadata::{TableInfoBuilder, TableInfoRef, TableMetaBuilder, TableType};
+use crate::requests::{CreateTableRequest, InsertRequest};
use crate::table::scan::SimpleTableScan;
-use crate::Result;
-use crate::{
- metadata::{TableMetaBuilder, TableType},
- requests::CreateTableRequest,
- Table,
-};
+use crate::{Result, Table};
pub struct EmptyTable {
info: TableInfoRef,
diff --git a/src/table/src/test_util/mock_engine.rs b/src/table/src/test_util/mock_engine.rs
index 75c038f356e2..8142ccf0ac3c 100644
--- a/src/table/src/test_util/mock_engine.rs
+++ b/src/table/src/test_util/mock_engine.rs
@@ -4,12 +4,10 @@ use std::sync::Arc;
use async_trait::async_trait;
use tokio::sync::Mutex;
+use crate::engine::{EngineContext, TableEngine, TableReference};
+use crate::requests::{AlterTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest};
use crate::test_util::EmptyTable;
-use crate::{
- engine::{EngineContext, TableEngine, TableReference},
- requests::{AlterTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest},
- Result, TableRef,
-};
+use crate::{Result, TableRef};
#[derive(Default)]
pub struct MockTableEngine {
|
chore
|
specify import style in rustfmt (#460)
|
8f4ec536de6fd6b3748cc6da29dd17132250898b
|
2024-09-03 07:45:01
|
dennis zhuang
|
feat: grpc writing supports TTL hint (#4651)
| false
|
diff --git a/src/operator/src/insert.rs b/src/operator/src/insert.rs
index 90edee015705..018021f47100 100644
--- a/src/operator/src/insert.rs
+++ b/src/operator/src/insert.rs
@@ -45,7 +45,7 @@ use store_api::metric_engine_consts::{
};
use store_api::mito_engine_options::{APPEND_MODE_KEY, MERGE_MODE_KEY};
use store_api::storage::{RegionId, TableId};
-use table::requests::InsertRequest as TableInsertRequest;
+use table::requests::{InsertRequest as TableInsertRequest, TTL_KEY};
use table::table_reference::TableReference;
use table::TableRef;
@@ -650,7 +650,12 @@ impl Inserter {
create_type: AutoCreateTableType,
) -> Result<TableRef> {
let mut hint_options = vec![];
- let options: &[(&str, &str)] = match create_type {
+
+ if let Some(ttl) = ctx.extension(TTL_KEY) {
+ hint_options.push((TTL_KEY, ttl));
+ }
+
+ match create_type {
AutoCreateTableType::Logical(_) => unreachable!(),
AutoCreateTableType::Physical => {
if let Some(append_mode) = ctx.extension(APPEND_MODE_KEY) {
@@ -659,13 +664,18 @@ impl Inserter {
if let Some(merge_mode) = ctx.extension(MERGE_MODE_KEY) {
hint_options.push((MERGE_MODE_KEY, merge_mode));
}
- hint_options.as_slice()
}
// Set append_mode to true for log table.
// because log tables should keep rows with the same ts and tags.
- AutoCreateTableType::Log => &[(APPEND_MODE_KEY, "true")],
- AutoCreateTableType::LastNonNull => &[(MERGE_MODE_KEY, "last_non_null")],
- };
+ AutoCreateTableType::Log => {
+ hint_options.push((APPEND_MODE_KEY, "true"));
+ }
+ AutoCreateTableType::LastNonNull => {
+ hint_options.push((MERGE_MODE_KEY, "last_non_null"));
+ }
+ }
+ let options: &[(&str, &str)] = hint_options.as_slice();
+
self.create_table_with_options(req, ctx, statement_executor, options)
.await
}
|
feat
|
grpc writing supports TTL hint (#4651)
|
76f1a79f1b44f324cb5321b7e68349d0e2658ef6
|
2023-03-15 13:11:36
|
zyy17
|
ci: set 'continue-on-error' to false since the problem of compiling binary was resolved (#1182)
| false
|
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 1cf0b13285e2..2b33f1e3eae9 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -33,17 +33,17 @@ jobs:
- arch: aarch64-unknown-linux-gnu
os: ubuntu-2004-16-cores
file: greptime-linux-arm64
- continue-on-error: true
+ continue-on-error: false
opts: "-F pyo3_backend"
- arch: aarch64-apple-darwin
os: macos-latest
file: greptime-darwin-arm64
- continue-on-error: true
+ continue-on-error: false
opts: "-F pyo3_backend"
- arch: x86_64-apple-darwin
os: macos-latest
file: greptime-darwin-amd64
- continue-on-error: true
+ continue-on-error: false
opts: "-F pyo3_backend"
runs-on: ${{ matrix.os }}
continue-on-error: ${{ matrix.continue-on-error }}
|
ci
|
set 'continue-on-error' to false since the problem of compiling binary was resolved (#1182)
|
a3fa455f312e80031648d17be455af6e7d0617e1
|
2023-08-01 16:56:48
|
Ruihang Xia
|
docs: rfc of metric engine (#1925)
| false
|
diff --git a/docs/rfcs/2023-07-10-metric-engine.md b/docs/rfcs/2023-07-10-metric-engine.md
new file mode 100644
index 000000000000..6ebd14759f5c
--- /dev/null
+++ b/docs/rfcs/2023-07-10-metric-engine.md
@@ -0,0 +1,202 @@
+---
+Feature Name: metric-engine
+Tracking Issue: TBD
+Date: 2023-07-10
+Author: "Ruihang Xia <[email protected]>"
+---
+
+# Summary
+
+A new metric engine that can significantly enhance our ability to handle the tremendous number of small tables in scenarios like Prometheus metrics, by leveraging a synthetic wide table that offers storage and metadata multiplexing capabilities over the existing engine.
+
+# Motivation
+
+The concept "Table" in GreptimeDB is a bit "heavy" compared to other time-series storage like Prometheus or VictoriaMetrics. This has lots of disadvantages in aspects from performance, footprint, and storage to cost.
+
+# Details
+
+## Top level description
+
+- User Interface
+
+ This feature will add a new type of storage engine. It might be available to be an option like `with ENGINE=mito` or an internal interface like auto create table on Prometheus remote write. From the user side, there is no difference from tables in mito engine. All the DDL like `CREATE`, `ALTER` and DML like `SELECT` should be supported.
+
+- Implementation Overlook
+
+ This new engine doesn't re-implement low level components like file R/W etc. It's a wrapper layer over the existing mito engine, with extra storage and metadata multiplexing capabilities. I.e., it expose multiple table based on one mito engine table like this:
+ ``` plaintext
+ ┌───────────────┐ ┌───────────────┐ ┌───────────────┐
+ │ Metric Engine │ │ Metric Engine │ │ Metric Engine │
+ │ Table 1 │ │ Table 2 │ │ Table 3 │
+ └───────────────┘ └───────────────┘ └───────────────┘
+ ▲ ▲ ▲
+ │ │ │
+ └───────────────┼───────────────────┘
+ │
+ ┌─────────┴────────┐
+ │ Metric Region │
+ │ Engine │
+ │ ┌─────────────┤
+ │ │ Mito Region │
+ │ │ Engine │
+ └────▲─────────────┘
+ │
+ │
+ ┌─────┴───────────────┐
+ │ │
+ │ Mito Engine Table │
+ │ │
+ └─────────────────────┘
+ ```
+
+The following parts will describe these implementation details:
+ - How to route these metric region tables and how those table are distributed
+ - How to maintain the schema and other metadata of the underlying mito engine table
+ - How to maintain the schema of metric engine table
+ - How the query goes
+
+## Routing
+
+Before this change, the region route rule was based on a group of partition keys. Relation of physical table to region is one-to-many.
+
+``` rust
+ pub struct PartitionDef {
+ partition_columns: Vec<String>,
+ partition_bounds: Vec<PartitionBound>,
+ }
+```
+
+And for metric engine tables, the key difference is we split the concept of "physical table" and "logical table". Like the previous ASCII chart, multiple logical tables are based on one physical table. The relationship of logical table to region becomes many-to-many. Thus, we must include the table name (of logical table) into partition rules.
+
+Consider the partition/route interface is a generic map of string array to region id, all we need to do is to insert logical table name into the request:
+
+``` rust
+ fn route(request: Vec<String>) -> RegionId;
+```
+
+The next question is, where to do this conversion? The basic idea is to dispatch different routing behavior based on the engine type. Since we have all the necessary information in frontend, it's a good place to do that. And can leave meta server untouched. The essential change is to associate engine type with route rule.
+
+## Physical Region Schema
+
+The idea "physical wide table" is to perform column-level multiplexing. I.e., map all logical columns to physical columns by their names.
+
+```
+ ┌────────────┐ ┌────────────┐ ┌────────────┐
+ │ Table 1 │ │ Table 2 │ │ Table 3 │
+ ├───┬────┬───┤ ├───┬────┬───┤ ├───┬────┬───┤
+ │C1 │ C2 │ C3│ │C1 │ C3 │ C5├──────┐ │C2 │ C4 │ C6│
+ └─┬─┴──┬─┴─┬─┘ ┌────┴───┴──┬─┴───┘ │ └─┬─┴──┬─┴─┬─┘
+ │ │ │ │ │ │ │ │ │
+ │ │ │ │ └──────────┐ │ │ │ │
+ │ │ │ │ │ │ │ │ │
+ │ │ │ │ ┌─────────────────┐ │ │ │ │ │
+ │ │ │ │ │ Physical Table │ │ │ │ │ │
+ │ │ │ │ ├──┬──┬──┬──┬──┬──┘ │ │ │ │ │
+ └────x───x───┴─►│C1│C2│C3│C4│C5│C6◄─┼─x────x────x───┘
+ │ │ └──┘▲─┘▲─┴─▲└─▲└──┘ │ │ │ │
+ │ │ │ │ │ │ │ │ │ │
+ ├───x──────────┘ ├───x──x─────┘ │ │ │
+ │ │ │ │ │ │ │ │
+ │ └─────────────┘ │ └───────┘ │ │
+ │ │ │ │
+ └─────────────────────x───────────────┘ │
+ │ │
+ └────────────────────┘
+```
+
+This approach is very straightforward but has one problem. It only works when two columns have different semantic type (time index, tag or field) or data types but with the same name. E.g., `CREATE TABLE t1 (c1 timestamp(3) TIME INDEX)` and `CREATE TABLE t2 (c1 STRING PRIMARY KEY)`.
+
+One possible workaround is to prefix each column with its data type and semantic type, like `_STRING_PK_c1`. However, considering the primary goal at present is to support data from monitoring metrics like Prometheus remote write, it's acceptable not to support this at first because data types are often simple and limited here.
+
+
+The next point is changing the physical table's schema. This is only needed when creating a new logical table or altering the existing table. Typically speaking, table creating and altering are explicit. We only need to emit an add column request to underlying physical table on processing logical table's DDL. GreptimeDB can create or alter table automatically on some protocols, but the internal logic is the same.
+
+Also for simplicity, we don't support shrinking the underlying table at first. This can be achieved by introducing mechanism on the physical column.
+
+Frontend needs not to keep physical table's schema.
+
+## Metadata of physical regions
+
+Those metric engine regions need to store extra metadata like the schema of logical table or all logical table's name. That information is relatively simple and can be stored in a format like key-value pair. For now, we have to use another physical mito region for metadata. This involves an issue with region scheduling. Since we don't have the ability to perform affinity scheduling, the initial version will just assume the data region and metadata region are in the same instance. See alternatives - other storage for physical region's metadata for possible future improvement.
+
+Here is the schema of metadata region and how we would use it. The `CREATE TABLE` clause of metadata region looks like the following. Notice that it wouldn't be actually created by SQL.
+
+``` sql
+ CREATE TABLE metadata(
+ ts timestamp time index,
+ key string primary key,
+ value string
+ );
+```
+
+The `ts` field is just a placeholder -- for the constraints that a mito region must contain a time index field. It will always be `0`. The other two fields `key` and `value` will be used as a k-v storage. It contains two group of key
+ - `__table_<TABLE_NAME>` is used for marking table existence. It doesn't have value.
+ - `__column_<TABLE_NAME>_<COLUMN_NAME>` is used for marking table existence, the value is column's semantic type.
+
+## Physical region implementation
+
+This RFC proposes to add a new region implementation named "MetricRegion". As showed in the first chart, it's wrapped over the existing mito region. This section will describe the implementation details. Firstly, here is a chart shows how the region hierarchy looks like:
+
+```plaintext
+ ┌───────────────────────┐
+ │ Metric Region │
+ │ │
+ │ ┌────────┬──────────┤
+ │ │ Mito │ Mito │
+ │ │ Region │ Region │
+ │ │ for │ for │
+ │ │ Data │ Metadata │
+ └───┴────────┴──────────┘
+```
+
+All upper levels only see the Metric Region. E.g., Meta Server schedules on this region, or Frontend routes requests to this Metrics Region's id. To be scheduled (open or close etc.), Metric Region needs to implement its own procedures. Most of those procedures can be simply assembled from underlying Mito Regions', but those related to data like alter or drop will have its own new logic.
+
+Another point is region id. Since the region id is used widely from meta server to persisted state, it's better to keep it unchanged. This means we can't use the same id for two regions, but one for each. To achieve this, this RFC proposes a concept named "region id group". A region id group is a group of region ids that are bound for different purposes. Like the two underlying regions here.
+
+This preserves the first 8 bits of the `u32` region number for grouping. Each group has one main id (the first one) and other sub ids (the rest non-zero ids). All components other than the region implementation itself doesn't aware of the existence of region id group. They only see the main id. The region implementation is in response of managing and using the region id group.
+
+```plaintext
+63 31 23 0
+┌────────────────────────────────────┬──────────┬──────────────────┐
+│ Table Id(32) │ Group(8) │ Region Number(24)│
+└────────────────────────────────────┴──────────┴──────────────────┘
+ Region Id(32)
+```
+
+## Routing in meta server
+
+From previous sections, we can conclude the following points about routing:
+- Each "logical table" has its own, universe unique table id.
+- Logical table doesn't have physical region, they share the same physical region with other logical tables.
+- Route rule of logical table's is a strict subset of physical table's.
+
+To associate the logical table with physical region, we need to specify necessary information in the create table request. Specifically, the table type and its parent table. This require to change our gRPC proto's definition. And once meta recognize the table to create is a logical table, it will use the parent table's region to create route entry.
+
+And to reduce the consumption of region failover (which need to update the physical table route info), we'd better to split the current route table structure into two parts:
+
+```rust
+region_route: Map<TableName, [RegionId]>,
+node_route: Map<RegionId, NodeId>,
+```
+
+By doing this on each failover the meta server only needs to update the second `node_route` map and leave the first one untouched.
+
+## Query
+
+Like other existing components, a user query always starts in the frontend. In the planning phase, frontend needs to fetch related schemas of the queried table. This part is the same. I.e., changes in this RFC don't affect components above the `Table` abstraction.
+
+# Alternatives
+
+## Other routing method
+
+We can also do this "special" route rule in the meta server. But there is no difference with the proposed method.
+
+## Other storage for physical region's metadata
+
+Once we have implemented the "region family" that allows multiple physical schemas exist in one region, we can store the metadata and table data into one region.
+
+Before that, we can also let the `MetricRegion` holds a `KvBackend` to access the storage layer directly. But this breaks the abstraction in some way.
+
+# Drawbacks
+
+Since the physical storage is mixed together. It's hard to do fine-grained operations in table level. Like configuring TTL, memtable size or compaction strategy in table level. Or define different partition rules for different tables. For scenarios like this, it's better to move the table out of metrics engine and "upgrade" it to a normal mito engine table. This requires a migration process in a low cost. And we have to ensure data consistency during the migration, which may require a out-of-service period.
\ No newline at end of file
|
docs
|
rfc of metric engine (#1925)
|
a89840f5f9f1ef30ce67c64deb12dc9fa69c27a6
|
2024-01-05 13:42:23
|
zyy17
|
refactor(metrics): add 'greptime_' prefix for every metrics (#3093)
| false
|
diff --git a/src/catalog/src/metrics.rs b/src/catalog/src/metrics.rs
index 1b673d6210a2..8039ae13f8a4 100644
--- a/src/catalog/src/metrics.rs
+++ b/src/catalog/src/metrics.rs
@@ -19,17 +19,17 @@ use prometheus::*;
lazy_static! {
pub static ref METRIC_CATALOG_MANAGER_CATALOG_COUNT: IntGauge =
- register_int_gauge!("catalog_catalog_count", "catalog catalog count").unwrap();
+ register_int_gauge!("greptime_catalog_catalog_count", "catalog catalog count").unwrap();
pub static ref METRIC_CATALOG_MANAGER_SCHEMA_COUNT: IntGauge =
- register_int_gauge!("catalog_schema_count", "catalog schema count").unwrap();
+ register_int_gauge!("greptime_catalog_schema_count", "catalog schema count").unwrap();
pub static ref METRIC_CATALOG_MANAGER_TABLE_COUNT: IntGaugeVec = register_int_gauge_vec!(
- "catalog_table_count",
+ "greptime_catalog_table_count",
"catalog table count",
&[METRIC_DB_LABEL]
)
.unwrap();
pub static ref METRIC_CATALOG_KV_REMOTE_GET: Histogram =
- register_histogram!("catalog_kv_get_remote", "catalog kv get remote").unwrap();
+ register_histogram!("greptime_catalog_kv_get_remote", "catalog kv get remote").unwrap();
pub static ref METRIC_CATALOG_KV_GET: Histogram =
- register_histogram!("catalog_kv_get", "catalog kv get").unwrap();
+ register_histogram!("greptime_catalog_kv_get", "catalog kv get").unwrap();
}
diff --git a/src/client/src/metrics.rs b/src/client/src/metrics.rs
index efd3f7b44166..6f543f1e7fbc 100644
--- a/src/client/src/metrics.rs
+++ b/src/client/src/metrics.rs
@@ -17,27 +17,30 @@ use prometheus::*;
lazy_static! {
pub static ref METRIC_GRPC_CREATE_TABLE: Histogram =
- register_histogram!("grpc_create_table", "grpc create table").unwrap();
- pub static ref METRIC_GRPC_PROMQL_RANGE_QUERY: Histogram =
- register_histogram!("grpc_promql_range_query", "grpc promql range query").unwrap();
+ register_histogram!("greptime_grpc_create_table", "grpc create table").unwrap();
+ pub static ref METRIC_GRPC_PROMQL_RANGE_QUERY: Histogram = register_histogram!(
+ "greptime_grpc_promql_range_query",
+ "grpc promql range query"
+ )
+ .unwrap();
pub static ref METRIC_GRPC_INSERT: Histogram =
- register_histogram!("grpc_insert", "grpc insert").unwrap();
+ register_histogram!("greptime_grpc_insert", "grpc insert").unwrap();
pub static ref METRIC_GRPC_DELETE: Histogram =
- register_histogram!("grpc_delete", "grpc delete").unwrap();
+ register_histogram!("greptime_grpc_delete", "grpc delete").unwrap();
pub static ref METRIC_GRPC_SQL: Histogram =
- register_histogram!("grpc_sql", "grpc sql").unwrap();
+ register_histogram!("greptime_grpc_sql", "grpc sql").unwrap();
pub static ref METRIC_GRPC_LOGICAL_PLAN: Histogram =
- register_histogram!("grpc_logical_plan", "grpc logical plan").unwrap();
+ register_histogram!("greptime_grpc_logical_plan", "grpc logical plan").unwrap();
pub static ref METRIC_GRPC_ALTER: Histogram =
- register_histogram!("grpc_alter", "grpc alter").unwrap();
+ register_histogram!("greptime_grpc_alter", "grpc alter").unwrap();
pub static ref METRIC_GRPC_DROP_TABLE: Histogram =
- register_histogram!("grpc_drop_table", "grpc drop table").unwrap();
+ register_histogram!("greptime_grpc_drop_table", "grpc drop table").unwrap();
pub static ref METRIC_GRPC_TRUNCATE_TABLE: Histogram =
- register_histogram!("grpc_truncate_table", "grpc truncate table").unwrap();
+ register_histogram!("greptime_grpc_truncate_table", "grpc truncate table").unwrap();
pub static ref METRIC_GRPC_DO_GET: Histogram =
- register_histogram!("grpc_do_get", "grpc do get").unwrap();
+ register_histogram!("greptime_grpc_do_get", "grpc do get").unwrap();
pub static ref METRIC_REGION_REQUEST_GRPC: HistogramVec = register_histogram_vec!(
- "grpc_region_request",
+ "greptime_grpc_region_request",
"grpc region request",
&["request_type"]
)
diff --git a/src/cmd/src/lib.rs b/src/cmd/src/lib.rs
index 52226e0441b3..dfa2b3a119d5 100644
--- a/src/cmd/src/lib.rs
+++ b/src/cmd/src/lib.rs
@@ -28,7 +28,7 @@ pub mod standalone;
lazy_static::lazy_static! {
static ref APP_VERSION: prometheus::IntGaugeVec =
- prometheus::register_int_gauge_vec!("app_version", "app version", &["short_version", "version"]).unwrap();
+ prometheus::register_int_gauge_vec!("greptime_app_version", "app version", &["short_version", "version"]).unwrap();
}
#[async_trait]
diff --git a/src/common/meta/src/metrics.rs b/src/common/meta/src/metrics.rs
index 0e009608b220..f6979eb198a8 100644
--- a/src/common/meta/src/metrics.rs
+++ b/src/common/meta/src/metrics.rs
@@ -16,36 +16,43 @@ use lazy_static::lazy_static;
use prometheus::*;
lazy_static! {
- pub static ref METRIC_META_TXN_REQUEST: HistogramVec =
- register_histogram_vec!("meta_txn_request", "meta txn request", &["target", "op"]).unwrap();
+ pub static ref METRIC_META_TXN_REQUEST: HistogramVec = register_histogram_vec!(
+ "greptime_meta_txn_request",
+ "meta txn request",
+ &["target", "op"]
+ )
+ .unwrap();
pub static ref METRIC_META_CREATE_CATALOG: Histogram =
- register_histogram!("meta_create_catalog", "meta create catalog").unwrap();
- pub static ref METRIC_META_CREATE_CATALOG_COUNTER: IntCounter =
- register_int_counter!("meta_create_catalog_counter", "meta create catalog").unwrap();
+ register_histogram!("greptime_meta_create_catalog", "meta create catalog").unwrap();
+ pub static ref METRIC_META_CREATE_CATALOG_COUNTER: IntCounter = register_int_counter!(
+ "greptime_meta_create_catalog_counter",
+ "meta create catalog"
+ )
+ .unwrap();
pub static ref METRIC_META_CREATE_SCHEMA: Histogram =
- register_histogram!("meta_create_schema", "meta create schema").unwrap();
+ register_histogram!("greptime_meta_create_schema", "meta create schema").unwrap();
pub static ref METRIC_META_CREATE_SCHEMA_COUNTER: IntCounter =
- register_int_counter!("meta_create_schema_counter", "meta create schema").unwrap();
+ register_int_counter!("greptime_meta_create_schema_counter", "meta create schema").unwrap();
pub static ref METRIC_META_PROCEDURE_CREATE_TABLE: HistogramVec = register_histogram_vec!(
- "meta_procedure_create_table",
+ "greptime_meta_procedure_create_table",
"meta procedure create table",
&["step"]
)
.unwrap();
pub static ref METRIC_META_PROCEDURE_DROP_TABLE: HistogramVec = register_histogram_vec!(
- "meta_procedure_drop_table",
+ "greptime_meta_procedure_drop_table",
"meta procedure drop table",
&["step"]
)
.unwrap();
pub static ref METRIC_META_PROCEDURE_ALTER_TABLE: HistogramVec = register_histogram_vec!(
- "meta_procedure_alter_table",
+ "greptime_meta_procedure_alter_table",
"meta procedure alter table",
&["step"]
)
.unwrap();
pub static ref METRIC_META_PROCEDURE_TRUNCATE_TABLE: HistogramVec = register_histogram_vec!(
- "meta_procedure_truncate_table",
+ "greptime_meta_procedure_truncate_table",
"meta procedure truncate table",
&["step"]
)
diff --git a/src/common/runtime/src/metrics.rs b/src/common/runtime/src/metrics.rs
index 30c5c474e452..c332ccf6e64d 100644
--- a/src/common/runtime/src/metrics.rs
+++ b/src/common/runtime/src/metrics.rs
@@ -20,13 +20,13 @@ pub const THREAD_NAME_LABEL: &str = "thread_name";
lazy_static! {
pub static ref METRIC_RUNTIME_THREADS_ALIVE: IntGaugeVec = register_int_gauge_vec!(
- "runtime_threads_alive",
+ "greptime_runtime_threads_alive",
"runtime threads alive",
&[THREAD_NAME_LABEL]
)
.unwrap();
pub static ref METRIC_RUNTIME_THREADS_IDLE: IntGaugeVec = register_int_gauge_vec!(
- "runtime_threads_idle",
+ "greptime_runtime_threads_idle",
"runtime threads idle",
&[THREAD_NAME_LABEL]
)
diff --git a/src/common/telemetry/src/panic_hook.rs b/src/common/telemetry/src/panic_hook.rs
index e96be38e9feb..f4145af28238 100644
--- a/src/common/telemetry/src/panic_hook.rs
+++ b/src/common/telemetry/src/panic_hook.rs
@@ -22,7 +22,7 @@ use prometheus::*;
lazy_static! {
pub static ref PANIC_COUNTER: IntCounter =
- register_int_counter!("panic_counter", "panic_counter").unwrap();
+ register_int_counter!("greptime_panic_counter", "panic_counter").unwrap();
}
pub fn set_panic_hook() {
diff --git a/src/datanode/src/metrics.rs b/src/datanode/src/metrics.rs
index 0ee6764d19af..7eba41869586 100644
--- a/src/datanode/src/metrics.rs
+++ b/src/datanode/src/metrics.rs
@@ -24,26 +24,26 @@ pub const REGION_ID: &str = "region_id";
lazy_static! {
/// The elapsed time of handling a request in the region_server.
pub static ref HANDLE_REGION_REQUEST_ELAPSED: HistogramVec = register_histogram_vec!(
- "datanode_handle_region_request_elapsed",
+ "greptime_datanode_handle_region_request_elapsed",
"datanode handle region request elapsed",
&[REGION_REQUEST_TYPE]
)
.unwrap();
/// The elapsed time since the last received heartbeat.
pub static ref LAST_RECEIVED_HEARTBEAT_ELAPSED: IntGauge = register_int_gauge!(
- "last_received_heartbeat_lease_elapsed",
+ "greptime_last_received_heartbeat_lease_elapsed",
"last received heartbeat lease elapsed",
)
.unwrap();
pub static ref LEASE_EXPIRED_REGION: IntGaugeVec = register_int_gauge_vec!(
- "lease_expired_region",
+ "greptime_lease_expired_region",
"lease expired region",
&[REGION_ID]
)
.unwrap();
/// The received region leases via heartbeat.
pub static ref HEARTBEAT_REGION_LEASES: IntGaugeVec = register_int_gauge_vec!(
- "heartbeat_region_leases",
+ "greptime_heartbeat_region_leases",
"received region leases via heartbeat",
&[REGION_ROLE]
)
diff --git a/src/frontend/src/metrics.rs b/src/frontend/src/metrics.rs
index f57cdcd17976..8475aca54c00 100644
--- a/src/frontend/src/metrics.rs
+++ b/src/frontend/src/metrics.rs
@@ -17,34 +17,34 @@ use prometheus::*;
lazy_static! {
pub static ref METRIC_HANDLE_SQL_ELAPSED: Histogram =
- register_histogram!("frontend_handle_sql_elapsed", "frontend handle sql elapsed").unwrap();
+ register_histogram!("greptime_frontend_handle_sql_elapsed", "frontend handle sql elapsed").unwrap();
pub static ref METRIC_HANDLE_PROMQL_ELAPSED: Histogram = register_histogram!(
- "frontend_handle_promql_elapsed",
+ "greptime_frontend_handle_promql_elapsed",
"frontend handle promql elapsed"
)
.unwrap();
pub static ref METRIC_EXEC_PLAN_ELAPSED: Histogram =
- register_histogram!("frontend_exec_plan_elapsed", "frontend exec plan elapsed").unwrap();
+ register_histogram!("greptime_frontend_exec_plan_elapsed", "frontend exec plan elapsed").unwrap();
pub static ref METRIC_HANDLE_SCRIPTS_ELAPSED: Histogram = register_histogram!(
- "frontend_handle_scripts_elapsed",
+ "greptime_frontend_handle_scripts_elapsed",
"frontend handle scripts elapsed"
)
.unwrap();
pub static ref METRIC_RUN_SCRIPT_ELAPSED: Histogram =
- register_histogram!("frontend_run_script_elapsed", "frontend run script elapsed").unwrap();
+ register_histogram!("greptime_frontend_run_script_elapsed", "frontend run script elapsed").unwrap();
/// The samples count of Prometheus remote write.
pub static ref PROM_STORE_REMOTE_WRITE_SAMPLES: IntCounter = register_int_counter!(
- "frontend_prometheus_remote_write_samples",
+ "greptime_frontend_prometheus_remote_write_samples",
"frontend prometheus remote write samples"
)
.unwrap();
pub static ref OTLP_METRICS_ROWS: IntCounter = register_int_counter!(
- "frontend_otlp_metrics_rows",
+ "greptime_frontend_otlp_metrics_rows",
"frontend otlp metrics rows"
)
.unwrap();
pub static ref OTLP_TRACES_ROWS: IntCounter = register_int_counter!(
- "frontend_otlp_traces_rows",
+ "greptime_frontend_otlp_traces_rows",
"frontend otlp traces rows"
)
.unwrap();
diff --git a/src/meta-srv/src/metrics.rs b/src/meta-srv/src/metrics.rs
index 1dc63c624b8b..ca35b290c279 100644
--- a/src/meta-srv/src/metrics.rs
+++ b/src/meta-srv/src/metrics.rs
@@ -18,31 +18,31 @@ use prometheus::*;
lazy_static! {
/// Elapsed time to responding kv requests.
pub static ref METRIC_META_KV_REQUEST_ELAPSED: HistogramVec = register_histogram_vec!(
- "meta_kv_request_elapsed",
+ "greptime_meta_kv_request_elapsed",
"meta kv request",
&["target", "op", "cluster_id"]
)
.unwrap();
/// The heartbeat connection gauge.
pub static ref METRIC_META_HEARTBEAT_CONNECTION_NUM: IntGauge = register_int_gauge!(
- "meta_heartbeat_connection_num",
+ "greptime_meta_heartbeat_connection_num",
"meta heartbeat connection num"
)
.unwrap();
/// Elapsed time to execution of heartbeat handlers.
pub static ref METRIC_META_HANDLER_EXECUTE: HistogramVec =
- register_histogram_vec!("meta_handler_execute", "meta handler execute", &["name"]).unwrap();
+ register_histogram_vec!("greptime_meta_handler_execute", "meta handler execute", &["name"]).unwrap();
/// Inactive region gauge.
pub static ref METRIC_META_INACTIVE_REGIONS: IntGauge =
- register_int_gauge!("meta_inactive_regions", "meta inactive regions").unwrap();
+ register_int_gauge!("greptime_meta_inactive_regions", "meta inactive regions").unwrap();
/// Elapsed time to leader cache kv.
pub static ref METRIC_META_LEADER_CACHED_KV_LOAD_ELAPSED: HistogramVec =
- register_histogram_vec!("meta_leader_cache_kv_load", "meta load cache", &["prefix"])
+ register_histogram_vec!("greptime_meta_leader_cache_kv_load", "meta load cache", &["prefix"])
.unwrap();
/// Meta kv cache hit counter.
pub static ref METRIC_META_KV_CACHE_HIT: IntCounterVec =
- register_int_counter_vec!("meta_kv_cache_hit", "meta kv cache hit", &["op"]).unwrap();
+ register_int_counter_vec!("greptime_meta_kv_cache_hit", "meta kv cache hit", &["op"]).unwrap();
/// Meta kv cache miss counter.
pub static ref METRIC_META_KV_CACHE_MISS: IntCounterVec =
- register_int_counter_vec!("meta_kv_cache_miss", "meta kv cache miss", &["op"]).unwrap();
+ register_int_counter_vec!("greptime_meta_kv_cache_miss", "meta kv cache miss", &["op"]).unwrap();
}
diff --git a/src/metric-engine/src/metrics.rs b/src/metric-engine/src/metrics.rs
index d026e57d0ac7..2804c72dbb81 100644
--- a/src/metric-engine/src/metrics.rs
+++ b/src/metric-engine/src/metrics.rs
@@ -20,21 +20,21 @@ use prometheus::*;
lazy_static! {
/// Gauge for opened regions
pub static ref PHYSICAL_REGION_COUNT: IntGauge =
- register_int_gauge!("metric_engine_physical_region_count", "metric engine physical region count").unwrap();
+ register_int_gauge!("greptime_metric_engine_physical_region_count", "metric engine physical region count").unwrap();
/// Gauge of columns across all opened regions
pub static ref PHYSICAL_COLUMN_COUNT: IntGauge =
- register_int_gauge!("metric_engine_physical_column_count", "metric engine physical column count").unwrap();
+ register_int_gauge!("greptime_metric_engine_physical_column_count", "metric engine physical column count").unwrap();
/// Gauge for opened logical regions
pub static ref LOGICAL_REGION_COUNT: IntGauge =
- register_int_gauge!("metric_engine_logical_region_count", "metric engine logical region count").unwrap();
+ register_int_gauge!("greptime_metric_engine_logical_region_count", "metric engine logical region count").unwrap();
/// Histogram for opened logical regions
pub static ref MITO_DDL_DURATION: Histogram =
- register_histogram!("metric_engine_mito_ddl", "metric engine mito ddl").unwrap();
+ register_histogram!("greptime_metric_engine_mito_ddl", "metric engine mito ddl").unwrap();
/// Counter for forbidden operations
pub static ref FORBIDDEN_OPERATION_COUNT: IntCounter =
- register_int_counter!("metric_engine_forbidden_request", "metric forbidden request").unwrap();
+ register_int_counter!("greptime_metric_engine_forbidden_request", "metric forbidden request").unwrap();
}
diff --git a/src/mito2/src/metrics.rs b/src/mito2/src/metrics.rs
index aa407bfdbced..fa7b199e571c 100644
--- a/src/mito2/src/metrics.rs
+++ b/src/mito2/src/metrics.rs
@@ -27,13 +27,13 @@ pub const FILE_TYPE_LABEL: &str = "file_type";
lazy_static! {
/// Global write buffer size in bytes.
pub static ref WRITE_BUFFER_BYTES: IntGauge =
- register_int_gauge!("mito_write_buffer_bytes", "mito write buffer bytes").unwrap();
+ register_int_gauge!("greptime_mito_write_buffer_bytes", "mito write buffer bytes").unwrap();
/// Gauge for open regions
pub static ref REGION_COUNT: IntGauge =
- register_int_gauge!("mito_region_count", "mito region count").unwrap();
+ register_int_gauge!("greptime_mito_region_count", "mito region count").unwrap();
/// Elapsed time to handle requests.
pub static ref HANDLE_REQUEST_ELAPSED: HistogramVec = register_histogram_vec!(
- "mito_handle_request_elapsed",
+ "greptime_mito_handle_request_elapsed",
"mito handle request elapsed",
&[TYPE_LABEL]
)
@@ -43,44 +43,44 @@ lazy_static! {
/// Counter of scheduled flush requests.
/// Note that the flush scheduler may merge some flush requests.
pub static ref FLUSH_REQUESTS_TOTAL: IntCounterVec = register_int_counter_vec!(
- "mito_flush_requests_total",
+ "greptime_mito_flush_requests_total",
"mito flush requests total",
&[FLUSH_REASON]
)
.unwrap();
/// Counter of scheduled failed flush jobs.
pub static ref FLUSH_ERRORS_TOTAL: IntCounter =
- register_int_counter!("mito_flush_errors_total", "mito flush errors total").unwrap();
+ register_int_counter!("greptime_mito_flush_errors_total", "mito flush errors total").unwrap();
/// Elapsed time of a flush job.
pub static ref FLUSH_ELAPSED: HistogramVec = register_histogram_vec!(
- "mito_flush_elapsed",
+ "greptime_mito_flush_elapsed",
"mito flush elapsed",
&[TYPE_LABEL]
)
.unwrap();
/// Histogram of flushed bytes.
pub static ref FLUSH_BYTES_TOTAL: IntCounter =
- register_int_counter!("mito_flush_bytes_total", "mito flush bytes total").unwrap();
+ register_int_counter!("greptime_mito_flush_bytes_total", "mito flush bytes total").unwrap();
// ------ End of flush related metrics
// ------ Write related metrics
/// Counter of stalled write requests.
pub static ref WRITE_STALL_TOTAL: IntCounter =
- register_int_counter!("mito_write_stall_total", "mito write stall total").unwrap();
+ register_int_counter!("greptime_mito_write_stall_total", "mito write stall total").unwrap();
/// Counter of rejected write requests.
pub static ref WRITE_REJECT_TOTAL: IntCounter =
- register_int_counter!("mito_write_reject_total", "mito write reject total").unwrap();
+ register_int_counter!("greptime_mito_write_reject_total", "mito write reject total").unwrap();
/// Elapsed time of each write stage.
pub static ref WRITE_STAGE_ELAPSED: HistogramVec = register_histogram_vec!(
- "mito_write_stage_elapsed",
+ "greptime_mito_write_stage_elapsed",
"mito write stage elapsed",
&[STAGE_LABEL]
)
.unwrap();
/// Counter of rows to write.
pub static ref WRITE_ROWS_TOTAL: IntCounterVec = register_int_counter_vec!(
- "mito_write_rows_total",
+ "greptime_mito_write_rows_total",
"mito write rows total",
&[TYPE_LABEL]
)
@@ -91,56 +91,56 @@ lazy_static! {
// Compaction metrics
/// Timer of different stages in compaction.
pub static ref COMPACTION_STAGE_ELAPSED: HistogramVec = register_histogram_vec!(
- "mito_compaction_stage_elapsed",
+ "greptime_mito_compaction_stage_elapsed",
"mito compaction stage elapsed",
&[STAGE_LABEL]
)
.unwrap();
/// Timer of whole compaction task.
pub static ref COMPACTION_ELAPSED_TOTAL: Histogram =
- register_histogram!("mito_compaction_total_elapsed", "mito compaction total elapsed").unwrap();
+ register_histogram!("greptime_mito_compaction_total_elapsed", "mito compaction total elapsed").unwrap();
/// Counter of all requested compaction task.
pub static ref COMPACTION_REQUEST_COUNT: IntCounter =
- register_int_counter!("mito_compaction_requests_total", "mito compaction requests total").unwrap();
+ register_int_counter!("greptime_mito_compaction_requests_total", "mito compaction requests total").unwrap();
/// Counter of failed compaction task.
pub static ref COMPACTION_FAILURE_COUNT: IntCounter =
- register_int_counter!("mito_compaction_failure_total", "mito compaction failure total").unwrap();
+ register_int_counter!("greptime_mito_compaction_failure_total", "mito compaction failure total").unwrap();
// ------- End of compaction metrics.
// Query metrics.
/// Timer of different stages in query.
pub static ref READ_STAGE_ELAPSED: HistogramVec = register_histogram_vec!(
- "mito_read_stage_elapsed",
+ "greptime_mito_read_stage_elapsed",
"mito read stage elapsed",
&[STAGE_LABEL]
)
.unwrap();
/// Counter of rows read.
pub static ref READ_ROWS_TOTAL: IntCounterVec =
- register_int_counter_vec!("mito_read_rows_total", "mito read rows total", &[TYPE_LABEL]).unwrap();
+ register_int_counter_vec!("greptime_mito_read_rows_total", "mito read rows total", &[TYPE_LABEL]).unwrap();
/// Counter of filtered rows during merge.
pub static ref MERGE_FILTER_ROWS_TOTAL: IntCounterVec =
- register_int_counter_vec!("mito_merge_filter_rows_total", "mito merge filter rows total", &[TYPE_LABEL]).unwrap();
+ register_int_counter_vec!("greptime_mito_merge_filter_rows_total", "mito merge filter rows total", &[TYPE_LABEL]).unwrap();
// ------- End of query metrics.
// Cache related metrics.
/// Cache hit counter.
pub static ref CACHE_HIT: IntCounterVec = register_int_counter_vec!(
- "mito_cache_hit",
+ "greptime_mito_cache_hit",
"mito cache hit",
&[TYPE_LABEL]
)
.unwrap();
/// Cache miss counter.
pub static ref CACHE_MISS: IntCounterVec = register_int_counter_vec!(
- "mito_cache_miss",
+ "greptime_mito_cache_miss",
"mito cache miss",
&[TYPE_LABEL]
)
.unwrap();
/// Cache size in bytes.
pub static ref CACHE_BYTES: IntGaugeVec = register_int_gauge_vec!(
- "mito_cache_bytes",
+ "greptime_mito_cache_bytes",
"mito cache bytes",
&[TYPE_LABEL]
)
@@ -156,13 +156,13 @@ lazy_static! {
.unwrap();
/// Gauge of index apply memory usage.
pub static ref INDEX_APPLY_MEMORY_USAGE: IntGauge = register_int_gauge!(
- "index_apply_memory_usage",
+ "greptime_index_apply_memory_usage",
"index apply memory usage",
)
.unwrap();
/// Counter of r/w bytes on index related IO operations.
pub static ref INDEX_IO_BYTES_TOTAL: IntCounterVec = register_int_counter_vec!(
- "index_io_bytes_total",
+ "greptime_index_io_bytes_total",
"index io bytes total",
&[TYPE_LABEL, FILE_TYPE_LABEL]
)
@@ -173,7 +173,7 @@ lazy_static! {
/// Counter of r/w operations on index related IO operations, e.g. read, write, seek and flush.
pub static ref INDEX_IO_OP_TOTAL: IntCounterVec = register_int_counter_vec!(
- "index_io_op_total",
+ "greptime_index_io_op_total",
"index io op total",
&[TYPE_LABEL, FILE_TYPE_LABEL]
)
diff --git a/src/object-store/src/metrics.rs b/src/object-store/src/metrics.rs
index 96016d766068..9ab3b7df1c8a 100644
--- a/src/object-store/src/metrics.rs
+++ b/src/object-store/src/metrics.rs
@@ -21,18 +21,18 @@ use prometheus::*;
lazy_static! {
/// Cache hit counter, no matter what the cache result is.
pub static ref OBJECT_STORE_LRU_CACHE_HIT: IntCounterVec = register_int_counter_vec!(
- "object_store_lru_cache_hit",
+ "greptime_object_store_lru_cache_hit",
"object store lru cache hit",
&["result"]
)
.unwrap();
/// Cache miss counter
pub static ref OBJECT_STORE_LRU_CACHE_MISS: IntCounter =
- register_int_counter!("object_store_lru_cache_miss", "object store lru cache miss")
+ register_int_counter!("greptime_object_store_lru_cache_miss", "object store lru cache miss")
.unwrap();
/// Object store read error counter
pub static ref OBJECT_STORE_READ_ERROR: IntCounterVec = register_int_counter_vec!(
- "object_store_read_errors",
+ "greptime_object_store_read_errors",
"object store read errors",
&["kind"]
)
@@ -40,11 +40,11 @@ lazy_static! {
/// Cache entry number
pub static ref OBJECT_STORE_LRU_CACHE_ENTRIES: IntGauge =
- register_int_gauge!("object_store_lru_cache_entries", "object store lru cache entries")
+ register_int_gauge!("greptime_object_store_lru_cache_entries", "object store lru cache entries")
.unwrap();
/// Cache size in bytes
pub static ref OBJECT_STORE_LRU_CACHE_BYTES: IntGauge =
- register_int_gauge!("object_store_lru_cache_bytes", "object store lru cache bytes")
+ register_int_gauge!("greptime_object_store_lru_cache_bytes", "object store lru cache bytes")
.unwrap();
}
diff --git a/src/operator/src/metrics.rs b/src/operator/src/metrics.rs
index 577d843cc069..52d0d39d4d50 100644
--- a/src/operator/src/metrics.rs
+++ b/src/operator/src/metrics.rs
@@ -16,10 +16,19 @@ use lazy_static::lazy_static;
use prometheus::*;
lazy_static! {
- pub static ref DIST_CREATE_TABLE: Histogram =
- register_histogram!("table_operator_create_table", "table operator create table").unwrap();
- pub static ref DIST_INGEST_ROW_COUNT: IntCounter =
- register_int_counter!("table_operator_ingest_rows", "table operator ingest rows").unwrap();
- pub static ref DIST_DELETE_ROW_COUNT: IntCounter =
- register_int_counter!("table_operator_delete_rows", "table operator delete rows").unwrap();
+ pub static ref DIST_CREATE_TABLE: Histogram = register_histogram!(
+ "greptime_table_operator_create_table",
+ "table operator create table"
+ )
+ .unwrap();
+ pub static ref DIST_INGEST_ROW_COUNT: IntCounter = register_int_counter!(
+ "greptime_table_operator_ingest_rows",
+ "table operator ingest rows"
+ )
+ .unwrap();
+ pub static ref DIST_DELETE_ROW_COUNT: IntCounter = register_int_counter!(
+ "greptime_table_operator_delete_rows",
+ "table operator delete rows"
+ )
+ .unwrap();
}
diff --git a/src/promql/src/metrics.rs b/src/promql/src/metrics.rs
index 5d3c49ac37c3..84070d4141b7 100644
--- a/src/promql/src/metrics.rs
+++ b/src/promql/src/metrics.rs
@@ -18,5 +18,5 @@ use prometheus::*;
lazy_static! {
/// Counter for the number of series processed per query.
pub static ref PROMQL_SERIES_COUNT: Histogram =
- register_histogram!("promql_series_count", "promql series count").unwrap();
+ register_histogram!("greptime_promql_series_count", "promql series count").unwrap();
}
diff --git a/src/query/src/metrics.rs b/src/query/src/metrics.rs
index f4a23b25a91f..1b7ad6bca20a 100644
--- a/src/query/src/metrics.rs
+++ b/src/query/src/metrics.rs
@@ -16,36 +16,48 @@ use lazy_static::lazy_static;
use prometheus::*;
lazy_static! {
- pub static ref METRIC_PARSE_SQL_ELAPSED: Histogram =
- register_histogram!("query_parse_sql_elapsed", "query parse sql elapsed").unwrap();
- pub static ref METRIC_PARSE_PROMQL_ELAPSED: Histogram =
- register_histogram!("query_parse_promql_elapsed", "query parse promql elapsed").unwrap();
+ pub static ref METRIC_PARSE_SQL_ELAPSED: Histogram = register_histogram!(
+ "greptime_query_parse_sql_elapsed",
+ "query parse sql elapsed"
+ )
+ .unwrap();
+ pub static ref METRIC_PARSE_PROMQL_ELAPSED: Histogram = register_histogram!(
+ "greptime_query_parse_promql_elapsed",
+ "query parse promql elapsed"
+ )
+ .unwrap();
pub static ref METRIC_OPTIMIZE_LOGICAL_ELAPSED: Histogram = register_histogram!(
- "query_optimize_logicalplan_elapsed",
+ "greptime_query_optimize_logicalplan_elapsed",
"query optimize logicalplan elapsed"
)
.unwrap();
pub static ref METRIC_OPTIMIZE_PHYSICAL_ELAPSED: Histogram = register_histogram!(
- "query_optimize_physicalplan_elapsed",
+ "greptime_query_optimize_physicalplan_elapsed",
"query optimize physicalplan elapsed"
)
.unwrap();
pub static ref METRIC_CREATE_PHYSICAL_ELAPSED: Histogram = register_histogram!(
- "query_create_physicalplan_elapsed",
+ "greptime_query_create_physicalplan_elapsed",
"query create physicalplan elapsed"
)
.unwrap();
- pub static ref METRIC_EXEC_PLAN_ELAPSED: Histogram =
- register_histogram!("query_execute_plan_elapsed", "query execute plan elapsed").unwrap();
+ pub static ref METRIC_EXEC_PLAN_ELAPSED: Histogram = register_histogram!(
+ "greptime_query_execute_plan_elapsed",
+ "query execute plan elapsed"
+ )
+ .unwrap();
pub static ref METRIC_MERGE_SCAN_POLL_ELAPSED: Histogram = register_histogram!(
- "query_merge_scan_poll_elapsed",
+ "greptime_query_merge_scan_poll_elapsed",
"query merge scan poll elapsed"
)
.unwrap();
- pub static ref METRIC_MERGE_SCAN_REGIONS: Histogram =
- register_histogram!("query_merge_scan_regions", "query merge scan regions").unwrap();
+ pub static ref METRIC_MERGE_SCAN_REGIONS: Histogram = register_histogram!(
+ "greptime_query_merge_scan_regions",
+ "query merge scan regions"
+ )
+ .unwrap();
pub static ref METRIC_MERGE_SCAN_ERRORS_TOTAL: IntCounter = register_int_counter!(
- "query_merge_scan_errors_total",
+ "greptime_query_merge_scan_errors_total",
"query merge scan errors total"
)
.unwrap();
diff --git a/src/script/src/python/metric.rs b/src/script/src/python/metric.rs
index c5d2c927eb2c..9bf43c321011 100644
--- a/src/script/src/python/metric.rs
+++ b/src/script/src/python/metric.rs
@@ -16,12 +16,18 @@ use lazy_static::lazy_static;
use prometheus::*;
lazy_static! {
- pub static ref METRIC_RSPY_INIT_ELAPSED: Histogram =
- register_histogram!("script_rspy_init_elapsed", "script rspy init elapsed").unwrap();
- pub static ref METRIC_RSPY_EXEC_ELAPSED: Histogram =
- register_histogram!("script_rspy_exec_elapsed", "script rspy exec elapsed").unwrap();
+ pub static ref METRIC_RSPY_INIT_ELAPSED: Histogram = register_histogram!(
+ "greptime_script_rspy_init_elapsed",
+ "script rspy init elapsed"
+ )
+ .unwrap();
+ pub static ref METRIC_RSPY_EXEC_ELAPSED: Histogram = register_histogram!(
+ "greptime_script_rspy_exec_elapsed",
+ "script rspy exec elapsed"
+ )
+ .unwrap();
pub static ref METRIC_RSPY_EXEC_TOTAL_ELAPSED: Histogram = register_histogram!(
- "script_rspy_exec_total_elapsed",
+ "greptime_script_rspy_exec_total_elapsed",
"script rspy exec total elapsed"
)
.unwrap();
@@ -29,12 +35,18 @@ lazy_static! {
#[cfg(feature = "pyo3_backend")]
lazy_static! {
- pub static ref METRIC_PYO3_EXEC_ELAPSED: Histogram =
- register_histogram!("script_pyo3_exec_elapsed", "script pyo3 exec elapsed").unwrap();
- pub static ref METRIC_PYO3_INIT_ELAPSED: Histogram =
- register_histogram!("script_pyo3_init_elapsed", "script pyo3 init elapsed").unwrap();
+ pub static ref METRIC_PYO3_EXEC_ELAPSED: Histogram = register_histogram!(
+ "greptime_script_pyo3_exec_elapsed",
+ "script pyo3 exec elapsed"
+ )
+ .unwrap();
+ pub static ref METRIC_PYO3_INIT_ELAPSED: Histogram = register_histogram!(
+ "greptime_script_pyo3_init_elapsed",
+ "script pyo3 init elapsed"
+ )
+ .unwrap();
pub static ref METRIC_PYO3_EXEC_TOTAL_ELAPSED: Histogram = register_histogram!(
- "script_pyo3_exec_total_elapsed",
+ "greptime_script_pyo3_exec_total_elapsed",
"script pyo3 exec total elapsed"
)
.unwrap();
diff --git a/src/servers/src/metrics.rs b/src/servers/src/metrics.rs
index 0c630ad3ac0a..87d8bd7a9fe4 100644
--- a/src/servers/src/metrics.rs
+++ b/src/servers/src/metrics.rs
@@ -42,165 +42,172 @@ pub(crate) const METRIC_METHOD_LABEL: &str = "method";
pub(crate) const METRIC_PATH_LABEL: &str = "path";
lazy_static! {
- pub static ref METRIC_ERROR_COUNTER: IntCounterVec =
- register_int_counter_vec!("servers_error", "servers error", &[METRIC_PROTOCOL_LABEL])
- .unwrap();
+ pub static ref METRIC_ERROR_COUNTER: IntCounterVec = register_int_counter_vec!(
+ "greptime_servers_error",
+ "servers error",
+ &[METRIC_PROTOCOL_LABEL]
+ )
+ .unwrap();
pub static ref METRIC_HTTP_SQL_ELAPSED: HistogramVec = register_histogram_vec!(
- "servers_http_sql_elapsed",
+ "greptime_servers_http_sql_elapsed",
"servers http sql elapsed",
&[METRIC_DB_LABEL]
)
.unwrap();
pub static ref METRIC_HTTP_PROMQL_ELAPSED: HistogramVec = register_histogram_vec!(
- "servers_http_promql_elapsed",
+ "greptime_servers_http_promql_elapsed",
"servers http promql elapsed",
&[METRIC_DB_LABEL]
)
.unwrap();
pub static ref METRIC_AUTH_FAILURE: IntCounterVec = register_int_counter_vec!(
- "servers_auth_failure_count",
+ "greptime_servers_auth_failure_count",
"servers auth failure count",
&[METRIC_CODE_LABEL]
)
.unwrap();
pub static ref METRIC_HTTP_INFLUXDB_WRITE_ELAPSED: HistogramVec = register_histogram_vec!(
- "servers_http_influxdb_write_elapsed",
+ "greptime_servers_http_influxdb_write_elapsed",
"servers http influxdb write elapsed",
&[METRIC_DB_LABEL]
)
.unwrap();
pub static ref METRIC_HTTP_PROM_STORE_WRITE_ELAPSED: HistogramVec = register_histogram_vec!(
- "servers_http_prometheus_write_elapsed",
+ "greptime_servers_http_prometheus_write_elapsed",
"servers http prometheus write elapsed",
&[METRIC_DB_LABEL]
)
.unwrap();
pub static ref METRIC_HTTP_PROM_STORE_READ_ELAPSED: HistogramVec = register_histogram_vec!(
- "servers_http_prometheus_read_elapsed",
+ "greptime_servers_http_prometheus_read_elapsed",
"servers http prometheus read elapsed",
&[METRIC_DB_LABEL]
)
.unwrap();
pub static ref METRIC_HTTP_OPENTELEMETRY_METRICS_ELAPSED: HistogramVec =
register_histogram_vec!(
- "servers_http_otlp_metrics_elapsed",
+ "greptime_servers_http_otlp_metrics_elapsed",
"servers_http_otlp_metrics_elapsed",
&[METRIC_DB_LABEL]
)
.unwrap();
pub static ref METRIC_HTTP_OPENTELEMETRY_TRACES_ELAPSED: HistogramVec =
register_histogram_vec!(
- "servers_http_otlp_traces_elapsed",
+ "greptime_servers_http_otlp_traces_elapsed",
"servers http otlp traces elapsed",
&[METRIC_DB_LABEL]
)
.unwrap();
pub static ref METRIC_TCP_OPENTSDB_LINE_WRITE_ELAPSED: Histogram = register_histogram!(
- "servers_opentsdb_line_write_elapsed",
+ "greptime_servers_opentsdb_line_write_elapsed",
"servers opentsdb line write elapsed"
)
.unwrap();
pub static ref METRIC_HTTP_PROMQL_FORMAT_QUERY_ELAPSED: Histogram = register_histogram!(
- "servers_http_promql_format_query_elapsed",
+ "greptime_servers_http_promql_format_query_elapsed",
"servers http promql format query elapsed"
)
.unwrap();
pub static ref METRIC_HTTP_PROMQL_INSTANT_QUERY_ELAPSED: Histogram = register_histogram!(
- "servers_http_promql_instant_query_elapsed",
+ "greptime_servers_http_promql_instant_query_elapsed",
"servers http promql instant query elapsed"
)
.unwrap();
pub static ref METRIC_HTTP_PROMQL_RANGE_QUERY_ELAPSED: Histogram = register_histogram!(
- "servers_http_promql_range_query_elapsed",
+ "greptime_servers_http_promql_range_query_elapsed",
"servers http promql range query elapsed"
)
.unwrap();
pub static ref METRIC_HTTP_PROMQL_LABEL_QUERY_ELAPSED: Histogram = register_histogram!(
- "servers_http_promql_label_query_elapsed",
+ "greptime_servers_http_promql_label_query_elapsed",
"servers http promql label query elapsed"
)
.unwrap();
pub static ref METRIC_HTTP_PROMQL_SERIES_QUERY_ELAPSED: Histogram = register_histogram!(
- "servers_http_promql_series_query_elapsed",
+ "greptime_servers_http_promql_series_query_elapsed",
"servers http promql series query elapsed"
)
.unwrap();
pub static ref METRIC_HTTP_PROMQL_LABEL_VALUE_QUERY_ELAPSED: Histogram = register_histogram!(
- "servers_http_promql_label_value_query_elapsed",
+ "greptime_servers_http_promql_label_value_query_elapsed",
"servers http promql label value query elapsed"
)
.unwrap();
pub static ref METRIC_MYSQL_CONNECTIONS: IntGauge = register_int_gauge!(
- "servers_mysql_connection_count",
+ "greptime_servers_mysql_connection_count",
"servers mysql connection count"
)
.unwrap();
pub static ref METRIC_MYSQL_QUERY_TIMER: HistogramVec = register_histogram_vec!(
- "servers_mysql_query_elapsed",
+ "greptime_servers_mysql_query_elapsed",
"servers mysql query elapsed",
&[METRIC_MYSQL_SUBPROTOCOL_LABEL, METRIC_DB_LABEL]
)
.unwrap();
pub static ref METRIC_MYSQL_PREPARED_COUNT: IntCounterVec = register_int_counter_vec!(
- "servers_mysql_prepared_count",
+ "greptime_servers_mysql_prepared_count",
"servers mysql prepared count",
&[METRIC_DB_LABEL]
)
.unwrap();
pub static ref METRIC_POSTGRES_CONNECTIONS: IntGauge = register_int_gauge!(
- "servers_postgres_connection_count",
+ "greptime_servers_postgres_connection_count",
"servers postgres connection count"
)
.unwrap();
pub static ref METRIC_POSTGRES_QUERY_TIMER: HistogramVec = register_histogram_vec!(
- "servers_postgres_query_elapsed",
+ "greptime_servers_postgres_query_elapsed",
"servers postgres query elapsed",
&[METRIC_POSTGRES_SUBPROTOCOL_LABEL, METRIC_DB_LABEL]
)
.unwrap();
pub static ref METRIC_POSTGRES_PREPARED_COUNT: IntCounter = register_int_counter!(
- "servers_postgres_prepared_count",
+ "greptime_servers_postgres_prepared_count",
"servers postgres prepared count"
)
.unwrap();
pub static ref METRIC_SERVER_GRPC_DB_REQUEST_TIMER: HistogramVec = register_histogram_vec!(
- "servers_grpc_db_request_elapsed",
+ "greptime_servers_grpc_db_request_elapsed",
"servers grpc db request elapsed",
&[METRIC_DB_LABEL, METRIC_TYPE_LABEL, METRIC_CODE_LABEL]
)
.unwrap();
pub static ref METRIC_SERVER_GRPC_PROM_REQUEST_TIMER: HistogramVec = register_histogram_vec!(
- "servers_grpc_prom_request_elapsed",
+ "greptime_servers_grpc_prom_request_elapsed",
"servers grpc prom request elapsed",
&[METRIC_DB_LABEL]
)
.unwrap();
pub static ref METRIC_HTTP_REQUESTS_TOTAL: IntCounterVec = register_int_counter_vec!(
- "servers_http_requests_total",
+ "greptime_servers_http_requests_total",
"servers http requests total",
&[METRIC_METHOD_LABEL, METRIC_PATH_LABEL, METRIC_CODE_LABEL]
)
.unwrap();
pub static ref METRIC_HTTP_REQUESTS_ELAPSED: HistogramVec = register_histogram_vec!(
- "servers_http_requests_elapsed",
+ "greptime_servers_http_requests_elapsed",
"servers http requests elapsed",
&[METRIC_METHOD_LABEL, METRIC_PATH_LABEL, METRIC_CODE_LABEL]
)
.unwrap();
pub static ref METRIC_GRPC_REQUESTS_TOTAL: IntCounterVec = register_int_counter_vec!(
- "servers_grpc_requests_total",
+ "greptime_servers_grpc_requests_total",
"servers grpc requests total",
&[METRIC_PATH_LABEL, METRIC_CODE_LABEL]
)
.unwrap();
pub static ref METRIC_GRPC_REQUESTS_ELAPSED: HistogramVec = register_histogram_vec!(
- "servers_grpc_requests_elapsed",
+ "greptime_servers_grpc_requests_elapsed",
"servers grpc requests elapsed",
&[METRIC_PATH_LABEL, METRIC_CODE_LABEL]
)
.unwrap();
- pub static ref HTTP_TRACK_METRICS: HistogramVec =
- register_histogram_vec!("http_track_metrics", "http track metrics", &["tag"]).unwrap();
+ pub static ref HTTP_TRACK_METRICS: HistogramVec = register_histogram_vec!(
+ "greptime_http_track_metrics",
+ "http track metrics",
+ &["tag"]
+ )
+ .unwrap();
}
// Based on https://github.com/hyperium/tonic/blob/master/examples/src/tower/server.rs
|
refactor
|
add 'greptime_' prefix for every metrics (#3093)
|
facdda4d9fbcc0722049f7dfe8c87594903aaaa0
|
2023-03-16 09:06:38
|
Weny Xu
|
feat: implement CONNECTION clause of Copy To (#1163)
| false
|
diff --git a/.env.example b/.env.example
index da1bbcc2136d..2f842b2f7659 100644
--- a/.env.example
+++ b/.env.example
@@ -2,7 +2,7 @@
GT_S3_BUCKET=S3 bucket
GT_S3_ACCESS_KEY_ID=S3 access key id
GT_S3_ACCESS_KEY=S3 secret access key
-
+GT_S3_ENDPOINT_URL=S3 endpoint url
# Settings for oss test
GT_OSS_BUCKET=OSS bucket
GT_OSS_ACCESS_KEY_ID=OSS access key id
diff --git a/Cargo.lock b/Cargo.lock
index 8445d67bd143..8b0a6e2f9689 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2327,6 +2327,7 @@ dependencies = [
"tower",
"tower-http",
"url",
+ "uuid",
]
[[package]]
diff --git a/src/datanode/Cargo.toml b/src/datanode/Cargo.toml
index eae2a9cd696c..7745b8bcfd74 100644
--- a/src/datanode/Cargo.toml
+++ b/src/datanode/Cargo.toml
@@ -64,6 +64,7 @@ tonic.workspace = true
tower = { version = "0.4", features = ["full"] }
tower-http = { version = "0.3", features = ["full"] }
url = "2.3.1"
+uuid.workspace = true
[dev-dependencies]
axum-test-helper = { git = "https://github.com/sunng87/axum-test-helper.git", branch = "patch-1" }
diff --git a/src/datanode/src/instance/sql.rs b/src/datanode/src/instance/sql.rs
index becbdbdea335..b363d2c8cd0a 100644
--- a/src/datanode/src/instance/sql.rs
+++ b/src/datanode/src/instance/sql.rs
@@ -163,14 +163,14 @@ impl Instance {
QueryStatement::Sql(Statement::Copy(copy_table)) => match copy_table {
CopyTable::To(copy_table) => {
let (catalog_name, schema_name, table_name) =
- table_idents_to_full_name(copy_table.table_name(), query_ctx.clone())?;
- let file_name = copy_table.file_name().to_string();
-
+ table_idents_to_full_name(©_table.table_name, query_ctx.clone())?;
+ let file_name = copy_table.file_name;
let req = CopyTableRequest {
catalog_name,
schema_name,
table_name,
file_name,
+ connection: copy_table.connection,
};
self.sql_handler
diff --git a/src/datanode/src/sql/copy_table.rs b/src/datanode/src/sql/copy_table.rs
index 5b401649d413..8acd9447ae32 100644
--- a/src/datanode/src/sql/copy_table.rs
+++ b/src/datanode/src/sql/copy_table.rs
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::collections::HashMap;
use std::pin::Pin;
use common_query::physical_plan::SessionContext;
@@ -22,16 +23,54 @@ use datafusion::parquet::basic::{Compression, Encoding};
use datafusion::parquet::file::properties::WriterProperties;
use datafusion::physical_plan::RecordBatchStream;
use futures::TryStreamExt;
-use object_store::services::Fs as Builder;
-use object_store::{ObjectStore, ObjectStoreBuilder};
+use object_store::ObjectStore;
use snafu::ResultExt;
use table::engine::TableReference;
use table::requests::CopyTableRequest;
+use url::{ParseError, Url};
+use super::copy_table_from::{build_fs_backend, build_s3_backend, S3_SCHEMA};
use crate::error::{self, Result};
use crate::sql::SqlHandler;
impl SqlHandler {
+ fn build_backend(
+ &self,
+ url: &str,
+ connection: HashMap<String, String>,
+ ) -> Result<(ObjectStore, String)> {
+ let result = Url::parse(url);
+
+ match result {
+ Ok(url) => {
+ let host = url.host_str();
+
+ let schema = url.scheme();
+
+ let path = url.path();
+
+ match schema.to_uppercase().as_str() {
+ S3_SCHEMA => {
+ let object_store = build_s3_backend(host, "/", connection)?;
+ Ok((object_store, path.to_string()))
+ }
+
+ _ => error::UnsupportedBackendProtocolSnafu {
+ protocol: schema.to_string(),
+ }
+ .fail(),
+ }
+ }
+ Err(ParseError::RelativeUrlWithoutBase) => {
+ let object_store = build_fs_backend("/")?;
+ Ok((object_store, url.to_string()))
+ }
+ Err(err) => Err(error::Error::InvalidUrl {
+ url: url.to_string(),
+ source: err,
+ }),
+ }
+ }
pub(crate) async fn copy_table(&self, req: CopyTableRequest) -> Result<Output> {
let table_ref = TableReference {
catalog: &req.catalog_name,
@@ -52,13 +91,9 @@ impl SqlHandler {
.context(error::TableScanExecSnafu)?;
let stream = Box::pin(DfRecordBatchStreamAdapter::new(stream));
- let accessor = Builder::default()
- .root("/")
- .build()
- .context(error::BuildBackendSnafu)?;
- let object_store = ObjectStore::new(accessor).finish();
+ let (object_store, file_name) = self.build_backend(&req.file_name, req.connection)?;
- let mut parquet_writer = ParquetWriter::new(req.file_name, stream, object_store);
+ let mut parquet_writer = ParquetWriter::new(file_name, stream, object_store);
// TODO(jiachun):
// For now, COPY is implemented synchronously.
// When copying large table, it will be blocked for a long time.
diff --git a/src/datanode/src/sql/copy_table_from.rs b/src/datanode/src/sql/copy_table_from.rs
index bedb36cf8e90..3c21e157692a 100644
--- a/src/datanode/src/sql/copy_table_from.rs
+++ b/src/datanode/src/sql/copy_table_from.rs
@@ -34,7 +34,7 @@ use url::{ParseError, Url};
use crate::error::{self, Result};
use crate::sql::SqlHandler;
-const S3_SCHEMA: &str = "S3";
+pub const S3_SCHEMA: &str = "S3";
const ENDPOINT_URL: &str = "ENDPOINT_URL";
const ACCESS_KEY_ID: &str = "ACCESS_KEY_ID";
const SECRET_ACCESS_KEY: &str = "SECRET_ACCESS_KEY";
@@ -165,13 +165,10 @@ impl DataSource {
Source::Dir
};
- let accessor = Fs::default()
- .root(&path)
- .build()
- .context(error::BuildBackendSnafu)?;
+ let object_store = build_fs_backend(&path)?;
Ok(DataSource {
- object_store: ObjectStore::new(accessor).finish(),
+ object_store,
source,
path,
regex,
@@ -184,59 +181,6 @@ impl DataSource {
}
}
- fn build_s3_backend(
- host: Option<&str>,
- path: &str,
- connection: HashMap<String, String>,
- ) -> Result<ObjectStore> {
- let mut builder = S3::default();
-
- builder.root(path);
-
- if let Some(bucket) = host {
- builder.bucket(bucket);
- }
-
- if let Some(endpoint) = connection.get(ENDPOINT_URL) {
- builder.endpoint(endpoint);
- }
-
- if let Some(region) = connection.get(REGION) {
- builder.region(region);
- }
-
- if let Some(key_id) = connection.get(ACCESS_KEY_ID) {
- builder.access_key_id(key_id);
- }
-
- if let Some(key) = connection.get(SECRET_ACCESS_KEY) {
- builder.secret_access_key(key);
- }
-
- if let Some(session_token) = connection.get(SESSION_TOKEN) {
- builder.security_token(session_token);
- }
-
- if let Some(enable_str) = connection.get(ENABLE_VIRTUAL_HOST_STYLE) {
- let enable = enable_str.as_str().parse::<bool>().map_err(|e| {
- error::InvalidConnectionSnafu {
- msg: format!(
- "failed to parse the option {}={}, {}",
- ENABLE_VIRTUAL_HOST_STYLE, enable_str, e
- ),
- }
- .build()
- })?;
- if enable {
- builder.enable_virtual_host_style();
- }
- }
-
- let accessor = builder.build().context(error::BuildBackendSnafu)?;
-
- Ok(ObjectStore::new(accessor).finish())
- }
-
fn from_url(
url: Url,
regex: Option<Regex>,
@@ -257,7 +201,7 @@ impl DataSource {
};
let object_store = match schema.to_uppercase().as_str() {
- S3_SCHEMA => DataSource::build_s3_backend(host, &dir, connection)?,
+ S3_SCHEMA => build_s3_backend(host, &dir, connection)?,
_ => {
return error::UnsupportedBackendProtocolSnafu {
protocol: schema.to_string(),
@@ -348,6 +292,68 @@ impl DataSource {
}
}
+pub fn build_s3_backend(
+ host: Option<&str>,
+ path: &str,
+ connection: HashMap<String, String>,
+) -> Result<ObjectStore> {
+ let mut builder = S3::default();
+
+ builder.root(path);
+
+ if let Some(bucket) = host {
+ builder.bucket(bucket);
+ }
+
+ if let Some(endpoint) = connection.get(ENDPOINT_URL) {
+ builder.endpoint(endpoint);
+ }
+
+ if let Some(region) = connection.get(REGION) {
+ builder.region(region);
+ }
+
+ if let Some(key_id) = connection.get(ACCESS_KEY_ID) {
+ builder.access_key_id(key_id);
+ }
+
+ if let Some(key) = connection.get(SECRET_ACCESS_KEY) {
+ builder.secret_access_key(key);
+ }
+
+ if let Some(session_token) = connection.get(SESSION_TOKEN) {
+ builder.security_token(session_token);
+ }
+
+ if let Some(enable_str) = connection.get(ENABLE_VIRTUAL_HOST_STYLE) {
+ let enable = enable_str.as_str().parse::<bool>().map_err(|e| {
+ error::InvalidConnectionSnafu {
+ msg: format!(
+ "failed to parse the option {}={}, {}",
+ ENABLE_VIRTUAL_HOST_STYLE, enable_str, e
+ ),
+ }
+ .build()
+ })?;
+ if enable {
+ builder.enable_virtual_host_style();
+ }
+ }
+
+ let accessor = builder.build().context(error::BuildBackendSnafu)?;
+
+ Ok(ObjectStore::new(accessor).finish())
+}
+
+pub fn build_fs_backend(root: &str) -> Result<ObjectStore> {
+ let accessor = Fs::default()
+ .root(root)
+ .build()
+ .context(error::BuildBackendSnafu)?;
+
+ Ok(ObjectStore::new(accessor).finish())
+}
+
#[cfg(test)]
mod tests {
diff --git a/src/datanode/src/tests/instance_test.rs b/src/datanode/src/tests/instance_test.rs
index 9ec4682b89a6..1fca1d977542 100644
--- a/src/datanode/src/tests/instance_test.rs
+++ b/src/datanode/src/tests/instance_test.rs
@@ -12,11 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::env;
use std::sync::Arc;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_query::Output;
use common_recordbatch::util;
+use common_telemetry::logging;
use datatypes::data_type::ConcreteDataType;
use datatypes::vectors::{Int64Vector, StringVector, UInt64Vector, VectorRef};
use query::parser::{QueryLanguageParser, QueryStatement};
@@ -797,6 +799,45 @@ async fn test_execute_copy_to() {
assert!(matches!(output, Output::AffectedRows(2)));
}
+#[tokio::test(flavor = "multi_thread")]
+async fn test_execute_copy_to_s3() {
+ logging::init_default_ut_logging();
+ if let Ok(bucket) = env::var("GT_S3_BUCKET") {
+ if !bucket.is_empty() {
+ let instance = setup_test_instance("test_execute_copy_to_s3").await;
+
+ // setups
+ execute_sql(
+ &instance,
+ "create table demo(host string, cpu double, memory double, ts timestamp time index);",
+ )
+ .await;
+
+ let output = execute_sql(
+ &instance,
+ r#"insert into demo(host, cpu, memory, ts) values
+ ('host1', 66.6, 1024, 1655276557000),
+ ('host2', 88.8, 333.3, 1655276558000)
+ "#,
+ )
+ .await;
+ assert!(matches!(output, Output::AffectedRows(2)));
+ let key_id = env::var("GT_S3_ACCESS_KEY_ID").unwrap();
+ let key = env::var("GT_S3_ACCESS_KEY").unwrap();
+ let url =
+ env::var("GT_S3_ENDPOINT_URL").unwrap_or("https://s3.amazonaws.com".to_string());
+
+ let root = uuid::Uuid::new_v4().to_string();
+
+ // exports
+ let copy_to_stmt = format!("Copy demo TO 's3://{}/{}/export/demo.parquet' CONNECTION (ACCESS_KEY_ID='{}',SECRET_ACCESS_KEY='{}',ENDPOINT_URL='{}')", bucket, root, key_id, key, url);
+
+ let output = execute_sql(&instance, ©_to_stmt).await;
+ assert!(matches!(output, Output::AffectedRows(2)));
+ }
+ }
+}
+
#[tokio::test(flavor = "multi_thread")]
async fn test_execute_copy_from() {
let instance = setup_test_instance("test_execute_copy_from").await;
@@ -882,6 +923,106 @@ async fn test_execute_copy_from() {
}
}
+#[tokio::test(flavor = "multi_thread")]
+async fn test_execute_copy_from_s3() {
+ logging::init_default_ut_logging();
+ if let Ok(bucket) = env::var("GT_S3_BUCKET") {
+ if !bucket.is_empty() {
+ let instance = setup_test_instance("test_execute_copy_from_s3").await;
+
+ // setups
+ execute_sql(
+ &instance,
+ "create table demo(host string, cpu double, memory double, ts timestamp time index);",
+ )
+ .await;
+
+ let output = execute_sql(
+ &instance,
+ r#"insert into demo(host, cpu, memory, ts) values
+ ('host1', 66.6, 1024, 1655276557000),
+ ('host2', 88.8, 333.3, 1655276558000)
+ "#,
+ )
+ .await;
+ assert!(matches!(output, Output::AffectedRows(2)));
+
+ // export
+ let root = uuid::Uuid::new_v4().to_string();
+ let key_id = env::var("GT_S3_ACCESS_KEY_ID").unwrap();
+ let key = env::var("GT_S3_ACCESS_KEY").unwrap();
+ let url =
+ env::var("GT_S3_ENDPOINT_URL").unwrap_or("https://s3.amazonaws.com".to_string());
+
+ let copy_to_stmt = format!("Copy demo TO 's3://{}/{}/export/demo.parquet' CONNECTION (ACCESS_KEY_ID='{}',SECRET_ACCESS_KEY='{}',ENDPOINT_URL='{}')", bucket, root, key_id, key, url);
+ logging::info!("Copy table to s3: {}", copy_to_stmt);
+
+ let output = execute_sql(&instance, ©_to_stmt).await;
+ assert!(matches!(output, Output::AffectedRows(2)));
+
+ struct Test<'a> {
+ sql: &'a str,
+ table_name: &'a str,
+ }
+ let tests = [
+ Test {
+ sql: &format!(
+ "Copy with_filename FROM 's3://{}/{}/export/demo.parquet_1_2'",
+ bucket, root
+ ),
+ table_name: "with_filename",
+ },
+ Test {
+ sql: &format!("Copy with_path FROM 's3://{}/{}/export/'", bucket, root),
+ table_name: "with_path",
+ },
+ Test {
+ sql: &format!(
+ "Copy with_pattern FROM 's3://{}/{}/export/' WITH (PATTERN = 'demo.*')",
+ bucket, root
+ ),
+ table_name: "with_pattern",
+ },
+ ];
+
+ for test in tests {
+ // import
+ execute_sql(
+ &instance,
+ &format!(
+ "create table {}(host string, cpu double, memory double, ts timestamp time index);",
+ test.table_name
+ ),
+ )
+ .await;
+ let sql = format!(
+ "{} CONNECTION (ACCESS_KEY_ID='{}',SECRET_ACCESS_KEY='{}',ENDPOINT_URL='{}')",
+ test.sql, key_id, key, url
+ );
+ logging::info!("Running sql: {}", sql);
+
+ let output = execute_sql(&instance, &sql).await;
+ assert!(matches!(output, Output::AffectedRows(2)));
+
+ let output = execute_sql(
+ &instance,
+ &format!("select * from {} order by ts", test.table_name),
+ )
+ .await;
+ let expected = "\
++-------+------+--------+---------------------+
+| host | cpu | memory | ts |
++-------+------+--------+---------------------+
+| host1 | 66.6 | 1024.0 | 2022-06-15T07:02:37 |
+| host2 | 88.8 | 333.3 | 2022-06-15T07:02:38 |
++-------+------+--------+---------------------+"
+ .to_string();
+ check_output_stream(output, expected).await;
+ }
+ }
+ }
+}
+
#[tokio::test(flavor = "multi_thread")]
async fn test_create_by_procedure() {
common_telemetry::init_default_ut_logging();
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index 2ccb4fa48aa2..2d1b2264a1d9 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -677,7 +677,7 @@ pub fn check_permission(
validate_param(delete.table_name(), query_ctx)?;
}
Statement::Copy(stmd) => match stmd {
- CopyTable::To(copy_table_to) => validate_param(copy_table_to.table_name(), query_ctx)?,
+ CopyTable::To(copy_table_to) => validate_param(©_table_to.table_name, query_ctx)?,
CopyTable::From(copy_table_from) => {
validate_param(©_table_from.table_name, query_ctx)?
}
diff --git a/src/sql/src/parsers/copy_parser.rs b/src/sql/src/parsers/copy_parser.rs
index 8bdac09243a5..518056dea87f 100644
--- a/src/sql/src/parsers/copy_parser.rs
+++ b/src/sql/src/parsers/copy_parser.rs
@@ -130,8 +130,24 @@ impl<'a> ParserContext<'a> {
}
}
+ let connection_options = self
+ .parser
+ .parse_options(Keyword::CONNECTION)
+ .context(error::SyntaxSnafu { sql: self.sql })?;
+
+ let connection = connection_options
+ .into_iter()
+ .filter_map(|option| {
+ if let Some(v) = ParserContext::parse_option_string(option.value) {
+ Some((option.name.value.to_uppercase(), v))
+ } else {
+ None
+ }
+ })
+ .collect();
+
Ok(CopyTable::To(CopyTableTo::new(
- table_name, file_name, format,
+ table_name, file_name, format, connection,
)))
}
@@ -167,7 +183,7 @@ mod tests {
match statement {
Statement::Copy(CopyTable::To(copy_table)) => {
let (catalog, schema, table) =
- if let [catalog, schema, table] = ©_table.table_name().0[..] {
+ if let [catalog, schema, table] = ©_table.table_name.0[..] {
(
catalog.value.clone(),
schema.value.clone(),
@@ -181,11 +197,11 @@ mod tests {
assert_eq!("schema0", schema);
assert_eq!("tbl", table);
- let file_name = copy_table.file_name();
+ let file_name = copy_table.file_name;
assert_eq!("tbl_file.parquet", file_name);
- let format = copy_table.format();
- assert_eq!(Format::Parquet, *format);
+ let format = copy_table.format;
+ assert_eq!(Format::Parquet, format);
}
_ => unreachable!(),
}
@@ -275,6 +291,44 @@ mod tests {
}
}
+ #[test]
+ fn test_parse_copy_table_to() {
+ struct Test<'a> {
+ sql: &'a str,
+ expected_connection: HashMap<String, String>,
+ }
+
+ let tests = [
+ Test {
+ sql: "COPY catalog0.schema0.tbl TO 'tbl_file.parquet' ",
+ expected_connection: HashMap::new(),
+ },
+ Test {
+ sql: "COPY catalog0.schema0.tbl TO 'tbl_file.parquet' CONNECTION (FOO='Bar', ONE='two')",
+ expected_connection: [("FOO","Bar"),("ONE","two")].into_iter().map(|(k,v)|{(k.to_string(),v.to_string())}).collect()
+ },
+ Test {
+ sql:"COPY catalog0.schema0.tbl TO 'tbl_file.parquet' WITH (FORMAT = 'parquet') CONNECTION (FOO='Bar', ONE='two')",
+ expected_connection: [("FOO","Bar"),("ONE","two")].into_iter().map(|(k,v)|{(k.to_string(),v.to_string())}).collect()
+ },
+ ];
+
+ for test in tests {
+ let mut result =
+ ParserContext::create_with_dialect(test.sql, &GenericDialect {}).unwrap();
+ assert_eq!(1, result.len());
+
+ let statement = result.remove(0);
+ assert_matches!(statement, Statement::Copy { .. });
+ match statement {
+ Statement::Copy(CopyTable::To(copy_table)) => {
+ assert_eq!(copy_table.connection.clone(), test.expected_connection);
+ }
+ _ => unreachable!(),
+ }
+ }
+ }
+
#[test]
fn test_parse_copy_table_with_unsupopoted_format() {
let results = [
diff --git a/src/sql/src/statements/copy.rs b/src/sql/src/statements/copy.rs
index 140e0babde01..e2c3862a1a30 100644
--- a/src/sql/src/statements/copy.rs
+++ b/src/sql/src/statements/copy.rs
@@ -26,31 +26,26 @@ pub enum CopyTable {
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct CopyTableTo {
- table_name: ObjectName,
- file_name: String,
- format: Format,
+ pub table_name: ObjectName,
+ pub file_name: String,
+ pub format: Format,
+ pub connection: HashMap<String, String>,
}
impl CopyTableTo {
- pub(crate) fn new(table_name: ObjectName, file_name: String, format: Format) -> Self {
+ pub(crate) fn new(
+ table_name: ObjectName,
+ file_name: String,
+ format: Format,
+ connection: HashMap<String, String>,
+ ) -> Self {
Self {
table_name,
file_name,
format,
+ connection,
}
}
-
- pub fn table_name(&self) -> &ObjectName {
- &self.table_name
- }
-
- pub fn file_name(&self) -> &str {
- &self.file_name
- }
-
- pub fn format(&self) -> &Format {
- &self.format
- }
}
// TODO: To combine struct CopyTableFrom and CopyTableTo
diff --git a/src/table/src/requests.rs b/src/table/src/requests.rs
index 76fff8a986c6..b09e47446fa2 100644
--- a/src/table/src/requests.rs
+++ b/src/table/src/requests.rs
@@ -197,6 +197,7 @@ pub struct CopyTableRequest {
pub schema_name: String,
pub table_name: String,
pub file_name: String,
+ pub connection: HashMap<String, String>,
}
#[derive(Debug)]
|
feat
|
implement CONNECTION clause of Copy To (#1163)
|
3d0d082c5672945b028872cf790a1f82e66f82af
|
2023-07-05 08:32:12
|
liyang
|
refactor: release push binary (#1883)
| false
|
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 7fd8f861b5c4..0fcd941f10db 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -136,7 +136,7 @@ jobs:
- name: Upload to S3
run: |
- aws s3 sync target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }} s3://${{ secrets.GREPTIMEDB_RELEASE_BUCKET_NAME }}/releases/${TAG}
+ aws s3 cp target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }} s3://${{ secrets.GREPTIMEDB_RELEASE_BUCKET_NAME }}/releases/${TAG} --recursive --exclude "*" --include "*.tgz"
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
@@ -312,7 +312,7 @@ jobs:
- name: Upload to S3
run: |
- aws s3 sync target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }} s3://${{ secrets.GREPTIMEDB_RELEASE_BUCKET_NAME }}/releases/${TAG}
+ aws s3 cp target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }} s3://${{ secrets.GREPTIMEDB_RELEASE_BUCKET_NAME }}/releases/${TAG} --recursive --exclude "*" --include "*.tgz"
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
refactor
|
release push binary (#1883)
|
c56f5e39cda71f85a3699f48e2bc3e658884177d
|
2023-08-26 13:11:15
|
Weny Xu
|
refactor: set default metasrv procedure retry times to 12 (#2242)
| false
|
diff --git a/config/metasrv.example.toml b/config/metasrv.example.toml
index 647d98c0ec3e..b1facf851871 100644
--- a/config/metasrv.example.toml
+++ b/config/metasrv.example.toml
@@ -26,7 +26,7 @@ enable_telemetry = true
# Procedure storage options.
[procedure]
# Procedure max retry time.
-max_retry_times = 3
+max_retry_times = 12
# Initial retry delay of procedures, increases exponentially
retry_delay = "500ms"
diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs
index 2afc782dac80..ca3d67dbc457 100644
--- a/src/meta-srv/src/metasrv.rs
+++ b/src/meta-srv/src/metasrv.rs
@@ -16,6 +16,7 @@ pub mod builder;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
+use std::time::Duration;
use api::v1::meta::Peer;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
@@ -79,7 +80,10 @@ impl Default for MetaSrvOptions {
dir: format!("{METASRV_HOME}/logs"),
..Default::default()
},
- procedure: ProcedureConfig::default(),
+ procedure: ProcedureConfig {
+ max_retry_times: 12,
+ retry_delay: Duration::from_millis(500),
+ },
datanode: DatanodeOptions::default(),
enable_telemetry: true,
data_home: METASRV_HOME.to_string(),
|
refactor
|
set default metasrv procedure retry times to 12 (#2242)
|
8ca9e014557fe3ca413a5e7ef0913620648b7729
|
2024-03-14 16:43:01
|
Yingwen
|
feat: Partition memtables by time if compaction window is provided (#3501)
| false
|
diff --git a/src/mito2/src/memtable.rs b/src/mito2/src/memtable.rs
index aa3d7e2bed71..8c9cd0172a0c 100644
--- a/src/mito2/src/memtable.rs
+++ b/src/mito2/src/memtable.rs
@@ -26,6 +26,7 @@ use table::predicate::Predicate;
use crate::error::Result;
use crate::flush::WriteBufferManagerRef;
+use crate::memtable::key_values::KeyValue;
pub use crate::memtable::key_values::KeyValues;
use crate::memtable::merge_tree::MergeTreeConfig;
use crate::metrics::WRITE_BUFFER_BYTES;
@@ -33,6 +34,7 @@ use crate::read::Batch;
pub mod key_values;
pub mod merge_tree;
+pub mod time_partition;
pub mod time_series;
pub(crate) mod version;
@@ -82,9 +84,12 @@ pub trait Memtable: Send + Sync + fmt::Debug {
/// Returns the id of this memtable.
fn id(&self) -> MemtableId;
- /// Write key values into the memtable.
+ /// Writes key values into the memtable.
fn write(&self, kvs: &KeyValues) -> Result<()>;
+ /// Writes one key value pair into the memtable.
+ fn write_one(&self, key_value: KeyValue) -> Result<()>;
+
/// Scans the memtable.
/// `projection` selects columns to read, `None` means reading all columns.
/// `filters` are the predicates to be pushed down to memtable.
diff --git a/src/mito2/src/memtable/key_values.rs b/src/mito2/src/memtable/key_values.rs
index 4986a81cb2c1..f1734e5a36e0 100644
--- a/src/mito2/src/memtable/key_values.rs
+++ b/src/mito2/src/memtable/key_values.rs
@@ -71,7 +71,7 @@ impl KeyValues {
/// Primary key columns have the same order as region's primary key. Field
/// columns are ordered by their position in the region schema (The same order
/// as users defined while creating the region).
-#[derive(Debug)]
+#[derive(Debug, Clone, Copy)]
pub struct KeyValue<'a> {
row: &'a Row,
schema: &'a Vec<ColumnSchema>,
diff --git a/src/mito2/src/memtable/merge_tree.rs b/src/mito2/src/memtable/merge_tree.rs
index 1789959adfee..a916f4f9b496 100644
--- a/src/mito2/src/memtable/merge_tree.rs
+++ b/src/mito2/src/memtable/merge_tree.rs
@@ -36,6 +36,7 @@ use table::predicate::Predicate;
use crate::error::Result;
use crate::flush::WriteBufferManagerRef;
+use crate::memtable::key_values::KeyValue;
use crate::memtable::merge_tree::metrics::WriteMetrics;
use crate::memtable::merge_tree::tree::MergeTree;
use crate::memtable::{
@@ -127,6 +128,17 @@ impl Memtable for MergeTreeMemtable {
res
}
+ fn write_one(&self, key_value: KeyValue) -> Result<()> {
+ let mut metrics = WriteMetrics::default();
+ let mut pk_buffer = Vec::new();
+ // Ensures the memtable always updates stats.
+ let res = self.tree.write_one(key_value, &mut pk_buffer, &mut metrics);
+
+ self.update_stats(&metrics);
+
+ res
+ }
+
fn iter(
&self,
projection: Option<&[ColumnId]>,
@@ -290,16 +302,14 @@ impl MemtableBuilder for MergeTreeMemtableBuilder {
#[cfg(test)]
mod tests {
- use std::collections::BTreeSet;
-
use common_time::Timestamp;
use datafusion_common::{Column, ScalarValue};
use datafusion_expr::{BinaryExpr, Expr, Operator};
use datatypes::scalars::ScalarVector;
- use datatypes::vectors::{Int64Vector, TimestampMillisecondVector};
+ use datatypes::vectors::Int64Vector;
use super::*;
- use crate::test_util::memtable_util;
+ use crate::test_util::memtable_util::{self, collect_iter_timestamps};
#[test]
fn test_memtable_sorted_input() {
@@ -322,23 +332,10 @@ mod tests {
let expected_ts = kvs
.iter()
.map(|kv| kv.timestamp().as_timestamp().unwrap().unwrap().value())
- .collect::<BTreeSet<_>>();
+ .collect::<Vec<_>>();
let iter = memtable.iter(None, None).unwrap();
- let read = iter
- .flat_map(|batch| {
- batch
- .unwrap()
- .timestamps()
- .as_any()
- .downcast_ref::<TimestampMillisecondVector>()
- .unwrap()
- .iter_data()
- .collect::<Vec<_>>()
- .into_iter()
- })
- .map(|v| v.unwrap().0.value())
- .collect::<BTreeSet<_>>();
+ let read = collect_iter_timestamps(iter);
assert_eq!(expected_ts, read);
let stats = memtable.stats();
@@ -386,20 +383,7 @@ mod tests {
memtable.write(&kvs).unwrap();
let iter = memtable.iter(None, None).unwrap();
- let read = iter
- .flat_map(|batch| {
- batch
- .unwrap()
- .timestamps()
- .as_any()
- .downcast_ref::<TimestampMillisecondVector>()
- .unwrap()
- .iter_data()
- .collect::<Vec<_>>()
- .into_iter()
- })
- .map(|v| v.unwrap().0.value())
- .collect::<Vec<_>>();
+ let read = collect_iter_timestamps(iter);
assert_eq!(vec![0, 1, 2, 3, 4, 5, 6, 7], read);
let iter = memtable.iter(None, None).unwrap();
@@ -514,20 +498,7 @@ mod tests {
let expect = data.into_iter().map(|x| x.2).collect::<Vec<_>>();
let iter = memtable.iter(None, None).unwrap();
- let read = iter
- .flat_map(|batch| {
- batch
- .unwrap()
- .timestamps()
- .as_any()
- .downcast_ref::<TimestampMillisecondVector>()
- .unwrap()
- .iter_data()
- .collect::<Vec<_>>()
- .into_iter()
- })
- .map(|v| v.unwrap().0.value())
- .collect::<Vec<_>>();
+ let read = collect_iter_timestamps(iter);
assert_eq!(expect, read);
}
@@ -564,20 +535,7 @@ mod tests {
let iter = memtable
.iter(None, Some(Predicate::new(vec![expr.into()])))
.unwrap();
- let read = iter
- .flat_map(|batch| {
- batch
- .unwrap()
- .timestamps()
- .as_any()
- .downcast_ref::<TimestampMillisecondVector>()
- .unwrap()
- .iter_data()
- .collect::<Vec<_>>()
- .into_iter()
- })
- .map(|v| v.unwrap().0.value())
- .collect::<Vec<_>>();
+ let read = collect_iter_timestamps(iter);
assert_eq!(timestamps, read);
}
}
diff --git a/src/mito2/src/memtable/merge_tree/tree.rs b/src/mito2/src/memtable/merge_tree/tree.rs
index 0a42e13fdec3..a059643dd478 100644
--- a/src/mito2/src/memtable/merge_tree/tree.rs
+++ b/src/mito2/src/memtable/merge_tree/tree.rs
@@ -148,6 +148,54 @@ impl MergeTree {
Ok(())
}
+ /// Write one key value pair into the tree.
+ ///
+ /// # Panics
+ /// Panics if the tree is immutable (frozen).
+ pub fn write_one(
+ &self,
+ kv: KeyValue,
+ pk_buffer: &mut Vec<u8>,
+ metrics: &mut WriteMetrics,
+ ) -> Result<()> {
+ let has_pk = !self.metadata.primary_key.is_empty();
+
+ ensure!(
+ kv.num_primary_keys() == self.row_codec.num_fields(),
+ PrimaryKeyLengthMismatchSnafu {
+ expect: self.row_codec.num_fields(),
+ actual: kv.num_primary_keys(),
+ }
+ );
+ // Safety: timestamp of kv must be both present and a valid timestamp value.
+ let ts = kv.timestamp().as_timestamp().unwrap().unwrap().value();
+ metrics.min_ts = metrics.min_ts.min(ts);
+ metrics.max_ts = metrics.max_ts.max(ts);
+ metrics.value_bytes += kv.fields().map(|v| v.data_size()).sum::<usize>();
+
+ if !has_pk {
+ // No primary key.
+ return self.write_no_key(kv);
+ }
+
+ // Encode primary key.
+ pk_buffer.clear();
+ if self.is_partitioned {
+ // Use sparse encoder for metric engine.
+ self.sparse_encoder
+ .encode_to_vec(kv.primary_keys(), pk_buffer)?;
+ } else {
+ self.row_codec.encode_to_vec(kv.primary_keys(), pk_buffer)?;
+ }
+
+ // Write rows with
+ self.write_with_key(pk_buffer, kv, metrics)?;
+
+ metrics.value_bytes += std::mem::size_of::<Timestamp>() + std::mem::size_of::<OpType>();
+
+ Ok(())
+ }
+
/// Scans the tree.
pub fn read(
&self,
diff --git a/src/mito2/src/memtable/time_partition.rs b/src/mito2/src/memtable/time_partition.rs
new file mode 100644
index 000000000000..cba3ba3079c7
--- /dev/null
+++ b/src/mito2/src/memtable/time_partition.rs
@@ -0,0 +1,551 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Partitions memtables by time.
+
+use std::collections::HashMap;
+use std::sync::{Arc, Mutex};
+use std::time::Duration;
+
+use common_telemetry::debug;
+use common_time::timestamp::TimeUnit;
+use common_time::timestamp_millis::BucketAligned;
+use common_time::Timestamp;
+use smallvec::{smallvec, SmallVec};
+use snafu::OptionExt;
+use store_api::metadata::RegionMetadataRef;
+
+use crate::error::{InvalidRequestSnafu, Result};
+use crate::memtable::key_values::KeyValue;
+use crate::memtable::version::SmallMemtableVec;
+use crate::memtable::{KeyValues, MemtableBuilderRef, MemtableId, MemtableRef};
+
+/// A partition holds rows with timestamps between `[min, max)`.
+#[derive(Debug, Clone)]
+pub struct TimePartition {
+ /// Memtable of the partition.
+ memtable: MemtableRef,
+ /// Time range of the partition. `None` means there is no time range. The time
+ /// range is `None` if and only if the [TimePartitions::part_duration] is `None`.
+ time_range: Option<PartTimeRange>,
+}
+
+impl TimePartition {
+ /// Returns whether the `ts` belongs to the partition.
+ fn contains_timestamp(&self, ts: Timestamp) -> bool {
+ let Some(range) = self.time_range else {
+ return true;
+ };
+
+ range.contains_timestamp(ts)
+ }
+
+ /// Write rows to the part.
+ fn write(&self, kvs: &KeyValues) -> Result<()> {
+ self.memtable.write(kvs)
+ }
+}
+
+type PartitionVec = SmallVec<[TimePartition; 2]>;
+
+/// Partitions.
+#[derive(Debug)]
+pub struct TimePartitions {
+ /// Mutable data of partitions.
+ inner: Mutex<PartitionsInner>,
+ /// Duration of a partition.
+ ///
+ /// `None` means there is only one partition and the [TimePartition::time_range] is
+ /// also `None`.
+ part_duration: Option<Duration>,
+ /// Metadata of the region.
+ metadata: RegionMetadataRef,
+ /// Builder of memtables.
+ builder: MemtableBuilderRef,
+}
+
+pub type TimePartitionsRef = Arc<TimePartitions>;
+
+impl TimePartitions {
+ /// Returns a new empty partition list with optional duration.
+ pub fn new(
+ metadata: RegionMetadataRef,
+ builder: MemtableBuilderRef,
+ next_memtable_id: MemtableId,
+ part_duration: Option<Duration>,
+ ) -> Self {
+ let mut inner = PartitionsInner::new(next_memtable_id);
+ if part_duration.is_none() {
+ // If `part_duration` is None, then we create a partition with `None` time
+ // range so we will write all rows to that partition.
+ let memtable = builder.build(inner.alloc_memtable_id(), &metadata);
+ debug!(
+ "Creates a time partition for all timestamps, region: {}, memtable_id: {}",
+ metadata.region_id,
+ memtable.id(),
+ );
+ let part = TimePartition {
+ memtable,
+ time_range: None,
+ };
+ inner.parts.push(part);
+ }
+
+ Self {
+ inner: Mutex::new(inner),
+ part_duration,
+ metadata,
+ builder,
+ }
+ }
+
+ /// Write key values to memtables.
+ ///
+ /// It creates new partitions if necessary.
+ pub fn write(&self, kvs: &KeyValues) -> Result<()> {
+ // Get all parts.
+ let parts = self.list_partitions();
+
+ // Checks whether all rows belongs to a single part. Checks in reverse order as we usually
+ // put to latest part.
+ for part in parts.iter().rev() {
+ let mut all_in_partition = true;
+ for kv in kvs.iter() {
+ // Safety: We checked the schema in the write request.
+ let ts = kv.timestamp().as_timestamp().unwrap().unwrap();
+ if !part.contains_timestamp(ts) {
+ all_in_partition = false;
+ break;
+ }
+ }
+ if !all_in_partition {
+ continue;
+ }
+
+ // We can write all rows to this part.
+ return part.write(kvs);
+ }
+
+ // Slow path: We have to split kvs by partitions.
+ self.write_multi_parts(kvs, &parts)
+ }
+
+ /// Append memtables in partitions to `memtables`.
+ pub fn list_memtables(&self, memtables: &mut Vec<MemtableRef>) {
+ let inner = self.inner.lock().unwrap();
+ memtables.extend(inner.parts.iter().map(|part| part.memtable.clone()));
+ }
+
+ /// Returns the number of partitions.
+ pub fn num_partitions(&self) -> usize {
+ let inner = self.inner.lock().unwrap();
+ inner.parts.len()
+ }
+
+ /// Returns true if all memtables are empty.
+ pub fn is_empty(&self) -> bool {
+ let inner = self.inner.lock().unwrap();
+ inner.parts.iter().all(|part| part.memtable.is_empty())
+ }
+
+ /// Freezes all memtables.
+ pub fn freeze(&self) -> Result<()> {
+ let inner = self.inner.lock().unwrap();
+ for part in &*inner.parts {
+ part.memtable.freeze()?;
+ }
+ Ok(())
+ }
+
+ /// Forks latest partition.
+ pub fn fork(&self, metadata: &RegionMetadataRef) -> Self {
+ let mut inner = self.inner.lock().unwrap();
+ let latest_part = inner
+ .parts
+ .iter()
+ .max_by_key(|part| part.time_range.map(|range| range.min_timestamp))
+ .cloned();
+
+ let Some(old_part) = latest_part else {
+ return Self::new(
+ metadata.clone(),
+ self.builder.clone(),
+ inner.next_memtable_id,
+ self.part_duration,
+ );
+ };
+ let memtable = old_part.memtable.fork(inner.alloc_memtable_id(), metadata);
+ let new_part = TimePartition {
+ memtable,
+ time_range: old_part.time_range,
+ };
+ Self {
+ inner: Mutex::new(PartitionsInner::with_partition(
+ new_part,
+ inner.next_memtable_id,
+ )),
+ part_duration: self.part_duration,
+ metadata: metadata.clone(),
+ builder: self.builder.clone(),
+ }
+ }
+
+ /// Returns partition duration.
+ pub(crate) fn part_duration(&self) -> Option<Duration> {
+ self.part_duration
+ }
+
+ /// Returns memory usage.
+ pub(crate) fn memory_usage(&self) -> usize {
+ let inner = self.inner.lock().unwrap();
+ inner
+ .parts
+ .iter()
+ .map(|part| part.memtable.stats().estimated_bytes)
+ .sum()
+ }
+
+ /// Append memtables in partitions to small vec.
+ pub(crate) fn list_memtables_to_small_vec(&self, memtables: &mut SmallMemtableVec) {
+ let inner = self.inner.lock().unwrap();
+ memtables.extend(inner.parts.iter().map(|part| part.memtable.clone()));
+ }
+
+ /// Returns the next memtable id.
+ pub(crate) fn next_memtable_id(&self) -> MemtableId {
+ let inner = self.inner.lock().unwrap();
+ inner.next_memtable_id
+ }
+
+ /// Returns all partitions.
+ fn list_partitions(&self) -> PartitionVec {
+ let inner = self.inner.lock().unwrap();
+ inner.parts.clone()
+ }
+
+ /// Write to multiple partitions.
+ fn write_multi_parts(&self, kvs: &KeyValues, parts: &PartitionVec) -> Result<()> {
+ // If part duration is `None` then there is always one partition and all rows
+ // will be put in that partition before invoking this method.
+ debug_assert!(self.part_duration.is_some());
+
+ let mut parts_to_write = HashMap::new();
+ let mut missing_parts = HashMap::new();
+ for kv in kvs.iter() {
+ let mut part_found = false;
+ // Safety: We used the timestamp before.
+ let ts = kv.timestamp().as_timestamp().unwrap().unwrap();
+ for part in parts {
+ if part.contains_timestamp(ts) {
+ // Safety: Since part duration is `Some` so all time range should be `Some`.
+ parts_to_write
+ .entry(part.time_range.unwrap().min_timestamp)
+ .or_insert_with(|| PartitionToWrite {
+ partition: part.clone(),
+ key_values: Vec::new(),
+ })
+ .key_values
+ .push(kv);
+ part_found = true;
+ break;
+ }
+ }
+
+ if !part_found {
+ // We need to write it to a new part.
+ // Safety: `new()` ensures duration is always Some if we do to this method.
+ let part_duration = self.part_duration.unwrap();
+ let part_start =
+ partition_start_timestamp(ts, part_duration).with_context(|| {
+ InvalidRequestSnafu {
+ region_id: self.metadata.region_id,
+ reason: format!(
+ "timestamp {ts:?} and bucket {part_duration:?} are out of range"
+ ),
+ }
+ })?;
+ missing_parts
+ .entry(part_start)
+ .or_insert_with(Vec::new)
+ .push(kv);
+ }
+ }
+
+ // Writes rows to existing parts.
+ for part_to_write in parts_to_write.into_values() {
+ for kv in part_to_write.key_values {
+ part_to_write.partition.memtable.write_one(kv)?;
+ }
+ }
+
+ let part_duration = self.part_duration.unwrap();
+ // Creates new parts and writes to them. Acquires the lock to avoid others create
+ // the same partition.
+ let mut inner = self.inner.lock().unwrap();
+ for (part_start, key_values) in missing_parts {
+ let part_pos = match inner
+ .parts
+ .iter()
+ .position(|part| part.time_range.unwrap().min_timestamp == part_start)
+ {
+ Some(pos) => pos,
+ None => {
+ let range = PartTimeRange::from_start_duration(part_start, part_duration)
+ .with_context(|| InvalidRequestSnafu {
+ region_id: self.metadata.region_id,
+ reason: format!(
+ "Partition time range for {part_start:?} is out of bound, bucket size: {part_duration:?}",
+ ),
+ })?;
+ let memtable = self
+ .builder
+ .build(inner.alloc_memtable_id(), &self.metadata);
+ debug!(
+ "Create time partition {:?} for region {}, duration: {:?}, memtable_id: {}, parts_total: {}",
+ range,
+ self.metadata.region_id,
+ part_duration,
+ memtable.id(),
+ inner.parts.len() + 1
+ );
+ let pos = inner.parts.len();
+ inner.parts.push(TimePartition {
+ memtable,
+ time_range: Some(range),
+ });
+ pos
+ }
+ };
+
+ let memtable = &inner.parts[part_pos].memtable;
+ for kv in key_values {
+ memtable.write_one(kv)?;
+ }
+ }
+
+ Ok(())
+ }
+}
+
+/// Computes the start timestamp of the partition for `ts`.
+///
+/// It always use bucket size in seconds which should fit all timestamp resolution.
+fn partition_start_timestamp(ts: Timestamp, bucket: Duration) -> Option<Timestamp> {
+ // Safety: We convert it to seconds so it never returns `None`.
+ let ts_sec = ts.convert_to(TimeUnit::Second).unwrap();
+ let bucket_sec: i64 = bucket.as_secs().try_into().ok()?;
+ let start_sec = ts_sec.align_by_bucket(bucket_sec)?;
+ start_sec.convert_to(ts.unit())
+}
+
+#[derive(Debug)]
+struct PartitionsInner {
+ /// All partitions.
+ parts: PartitionVec,
+ /// Next memtable id.
+ next_memtable_id: MemtableId,
+}
+
+impl PartitionsInner {
+ fn new(next_memtable_id: MemtableId) -> Self {
+ Self {
+ parts: Default::default(),
+ next_memtable_id,
+ }
+ }
+
+ fn with_partition(part: TimePartition, next_memtable_id: MemtableId) -> Self {
+ Self {
+ parts: smallvec![part],
+ next_memtable_id,
+ }
+ }
+
+ fn alloc_memtable_id(&mut self) -> MemtableId {
+ let id = self.next_memtable_id;
+ self.next_memtable_id += 1;
+ id
+ }
+}
+
+/// Time range of a partition.
+#[derive(Debug, Clone, Copy)]
+struct PartTimeRange {
+ /// Inclusive min timestamp of rows in the partition.
+ min_timestamp: Timestamp,
+ /// Exclusive max timestamp of rows in the partition.
+ max_timestamp: Timestamp,
+}
+
+impl PartTimeRange {
+ fn from_start_duration(start: Timestamp, duration: Duration) -> Option<Self> {
+ let start_sec = start.convert_to(TimeUnit::Second)?;
+ let end_sec = start_sec.add_duration(duration).ok()?;
+ let min_timestamp = start_sec.convert_to(start.unit())?;
+ let max_timestamp = end_sec.convert_to(start.unit())?;
+
+ Some(Self {
+ min_timestamp,
+ max_timestamp,
+ })
+ }
+
+ /// Returns whether the `ts` belongs to the partition.
+ fn contains_timestamp(&self, ts: Timestamp) -> bool {
+ self.min_timestamp <= ts && ts < self.max_timestamp
+ }
+}
+
+struct PartitionToWrite<'a> {
+ partition: TimePartition,
+ key_values: Vec<KeyValue<'a>>,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::memtable::merge_tree::MergeTreeMemtableBuilder;
+ use crate::test_util::memtable_util::{self, collect_iter_timestamps};
+
+ #[test]
+ fn test_no_duration() {
+ let metadata = memtable_util::metadata_for_test();
+ let builder = Arc::new(MergeTreeMemtableBuilder::default());
+ let partitions = TimePartitions::new(metadata.clone(), builder, 0, None);
+ assert_eq!(1, partitions.num_partitions());
+ assert!(partitions.is_empty());
+
+ let kvs = memtable_util::build_key_values(
+ &metadata,
+ "hello".to_string(),
+ 0,
+ &[1000, 3000, 7000, 5000, 6000],
+ 0, // sequence 0, 1, 2, 3, 4
+ );
+ partitions.write(&kvs).unwrap();
+
+ assert_eq!(1, partitions.num_partitions());
+ assert!(!partitions.is_empty());
+ assert!(!partitions.is_empty());
+ let mut memtables = Vec::new();
+ partitions.list_memtables(&mut memtables);
+
+ let iter = memtables[0].iter(None, None).unwrap();
+ let timestamps = collect_iter_timestamps(iter);
+ assert_eq!(&[1000, 3000, 5000, 6000, 7000], ×tamps[..]);
+ }
+
+ #[test]
+ fn test_write_single_part() {
+ let metadata = memtable_util::metadata_for_test();
+ let builder = Arc::new(MergeTreeMemtableBuilder::default());
+ let partitions =
+ TimePartitions::new(metadata.clone(), builder, 0, Some(Duration::from_secs(10)));
+ assert_eq!(0, partitions.num_partitions());
+
+ let kvs = memtable_util::build_key_values(
+ &metadata,
+ "hello".to_string(),
+ 0,
+ &[5000, 2000, 0],
+ 0, // sequence 0, 1, 2
+ );
+ // It should creates a new partition.
+ partitions.write(&kvs).unwrap();
+ assert_eq!(1, partitions.num_partitions());
+ assert!(!partitions.is_empty());
+
+ let kvs = memtable_util::build_key_values(
+ &metadata,
+ "hello".to_string(),
+ 0,
+ &[3000, 7000, 4000],
+ 3, // sequence 3, 4, 5
+ );
+ // Still writes to the same partition.
+ partitions.write(&kvs).unwrap();
+ assert_eq!(1, partitions.num_partitions());
+
+ let mut memtables = Vec::new();
+ partitions.list_memtables(&mut memtables);
+ let iter = memtables[0].iter(None, None).unwrap();
+ let timestamps = collect_iter_timestamps(iter);
+ assert_eq!(&[0, 2000, 3000, 4000, 5000, 7000], ×tamps[..]);
+ let parts = partitions.list_partitions();
+ assert_eq!(
+ Timestamp::new_millisecond(0),
+ parts[0].time_range.unwrap().min_timestamp
+ );
+ assert_eq!(
+ Timestamp::new_millisecond(10000),
+ parts[0].time_range.unwrap().max_timestamp
+ );
+ }
+
+ #[test]
+ fn test_write_multi_parts() {
+ let metadata = memtable_util::metadata_for_test();
+ let builder = Arc::new(MergeTreeMemtableBuilder::default());
+ let partitions =
+ TimePartitions::new(metadata.clone(), builder, 0, Some(Duration::from_secs(5)));
+ assert_eq!(0, partitions.num_partitions());
+
+ let kvs = memtable_util::build_key_values(
+ &metadata,
+ "hello".to_string(),
+ 0,
+ &[2000, 0],
+ 0, // sequence 0, 1
+ );
+ // It should creates a new partition.
+ partitions.write(&kvs).unwrap();
+ assert_eq!(1, partitions.num_partitions());
+ assert!(!partitions.is_empty());
+
+ let kvs = memtable_util::build_key_values(
+ &metadata,
+ "hello".to_string(),
+ 0,
+ &[3000, 7000, 4000, 5000],
+ 2, // sequence 2, 3, 4, 5
+ );
+ // Writes 2 rows to the old partition and 1 row to a new partition.
+ partitions.write(&kvs).unwrap();
+ assert_eq!(2, partitions.num_partitions());
+
+ let parts = partitions.list_partitions();
+ let iter = parts[0].memtable.iter(None, None).unwrap();
+ let timestamps = collect_iter_timestamps(iter);
+ assert_eq!(
+ Timestamp::new_millisecond(0),
+ parts[0].time_range.unwrap().min_timestamp
+ );
+ assert_eq!(
+ Timestamp::new_millisecond(5000),
+ parts[0].time_range.unwrap().max_timestamp
+ );
+ assert_eq!(&[0, 2000, 3000, 4000], ×tamps[..]);
+ let iter = parts[1].memtable.iter(None, None).unwrap();
+ let timestamps = collect_iter_timestamps(iter);
+ assert_eq!(&[5000, 7000], ×tamps[..]);
+ assert_eq!(
+ Timestamp::new_millisecond(5000),
+ parts[1].time_range.unwrap().min_timestamp
+ );
+ assert_eq!(
+ Timestamp::new_millisecond(10000),
+ parts[1].time_range.unwrap().max_timestamp
+ );
+ }
+}
diff --git a/src/mito2/src/memtable/time_series.rs b/src/mito2/src/memtable/time_series.rs
index 38ad4f328a34..c31f9bea7bcf 100644
--- a/src/mito2/src/memtable/time_series.rs
+++ b/src/mito2/src/memtable/time_series.rs
@@ -38,6 +38,7 @@ use table::predicate::Predicate;
use crate::error::{ComputeArrowSnafu, ConvertVectorSnafu, PrimaryKeyLengthMismatchSnafu, Result};
use crate::flush::WriteBufferManagerRef;
+use crate::memtable::key_values::KeyValue;
use crate::memtable::{
AllocTracker, BoxedBatchIterator, KeyValues, Memtable, MemtableBuilder, MemtableId,
MemtableRef, MemtableStats,
@@ -110,49 +111,75 @@ impl TimeSeriesMemtable {
}
/// Updates memtable stats.
- fn update_stats(&self, request_size: usize, min: i64, max: i64) {
- self.alloc_tracker.on_allocation(request_size);
+ fn update_stats(&self, stats: LocalStats) {
+ self.alloc_tracker.on_allocation(stats.allocated);
loop {
let current_min = self.min_timestamp.load(Ordering::Relaxed);
- if min >= current_min {
+ if stats.min_ts >= current_min {
break;
}
let Err(updated) = self.min_timestamp.compare_exchange(
current_min,
- min,
+ stats.min_ts,
Ordering::Relaxed,
Ordering::Relaxed,
) else {
break;
};
- if updated == min {
+ if updated == stats.min_ts {
break;
}
}
loop {
let current_max = self.max_timestamp.load(Ordering::Relaxed);
- if max <= current_max {
+ if stats.max_ts <= current_max {
break;
}
let Err(updated) = self.max_timestamp.compare_exchange(
current_max,
- max,
+ stats.max_ts,
Ordering::Relaxed,
Ordering::Relaxed,
) else {
break;
};
- if updated == max {
+ if updated == stats.max_ts {
break;
}
}
}
+
+ fn write_key_value(&self, kv: KeyValue, stats: &mut LocalStats) -> Result<()> {
+ ensure!(
+ kv.num_primary_keys() == self.row_codec.num_fields(),
+ PrimaryKeyLengthMismatchSnafu {
+ expect: self.row_codec.num_fields(),
+ actual: kv.num_primary_keys()
+ }
+ );
+ let primary_key_encoded = self.row_codec.encode(kv.primary_keys())?;
+ let fields = kv.fields().collect::<Vec<_>>();
+
+ stats.allocated += fields.iter().map(|v| v.data_size()).sum::<usize>();
+ let (series, series_allocated) = self.series_set.get_or_add_series(primary_key_encoded);
+ stats.allocated += series_allocated;
+
+ // safety: timestamp of kv must be both present and a valid timestamp value.
+ let ts = kv.timestamp().as_timestamp().unwrap().unwrap().value();
+ stats.min_ts = stats.min_ts.min(ts);
+ stats.max_ts = stats.max_ts.max(ts);
+
+ let mut guard = series.write().unwrap();
+ guard.push(kv.timestamp(), kv.sequence(), kv.op_type(), fields);
+
+ Ok(())
+ }
}
impl Debug for TimeSeriesMemtable {
@@ -167,43 +194,30 @@ impl Memtable for TimeSeriesMemtable {
}
fn write(&self, kvs: &KeyValues) -> Result<()> {
- let mut allocated = 0;
- let mut min_ts = i64::MAX;
- let mut max_ts = i64::MIN;
+ let mut local_stats = LocalStats::default();
for kv in kvs.iter() {
- ensure!(
- kv.num_primary_keys() == self.row_codec.num_fields(),
- PrimaryKeyLengthMismatchSnafu {
- expect: self.row_codec.num_fields(),
- actual: kv.num_primary_keys()
- }
- );
- let primary_key_encoded = self.row_codec.encode(kv.primary_keys())?;
- let fields = kv.fields().collect::<Vec<_>>();
-
- allocated += fields.iter().map(|v| v.data_size()).sum::<usize>();
- let (series, series_allocated) = self.series_set.get_or_add_series(primary_key_encoded);
- allocated += series_allocated;
-
- // safety: timestamp of kv must be both present and a valid timestamp value.
- let ts = kv.timestamp().as_timestamp().unwrap().unwrap().value();
- min_ts = min_ts.min(ts);
- max_ts = max_ts.max(ts);
-
- let mut guard = series.write().unwrap();
- guard.push(kv.timestamp(), kv.sequence(), kv.op_type(), fields);
+ self.write_key_value(kv, &mut local_stats)?;
}
- allocated += kvs.num_rows() * std::mem::size_of::<Timestamp>();
- allocated += kvs.num_rows() * std::mem::size_of::<OpType>();
+ local_stats.allocated += kvs.num_rows() * std::mem::size_of::<Timestamp>();
+ local_stats.allocated += kvs.num_rows() * std::mem::size_of::<OpType>();
// TODO(hl): this maybe inaccurate since for-iteration may return early.
// We may lift the primary key length check out of Memtable::write
// so that we can ensure writing to memtable will succeed.
- self.update_stats(allocated, min_ts, max_ts);
+ self.update_stats(local_stats);
Ok(())
}
+ fn write_one(&self, key_value: KeyValue) -> Result<()> {
+ let mut local_stats = LocalStats::default();
+ let res = self.write_key_value(key_value, &mut local_stats);
+ local_stats.allocated += std::mem::size_of::<Timestamp>() + std::mem::size_of::<OpType>();
+
+ self.update_stats(local_stats);
+ res
+ }
+
fn iter(
&self,
projection: Option<&[ColumnId]>,
@@ -267,6 +281,22 @@ impl Memtable for TimeSeriesMemtable {
}
}
+struct LocalStats {
+ allocated: usize,
+ min_ts: i64,
+ max_ts: i64,
+}
+
+impl Default for LocalStats {
+ fn default() -> Self {
+ LocalStats {
+ allocated: 0,
+ min_ts: i64::MAX,
+ max_ts: i64::MIN,
+ }
+ }
+}
+
type SeriesRwLockMap = RwLock<BTreeMap<Vec<u8>, Arc<RwLock<Series>>>>;
struct SeriesSet {
diff --git a/src/mito2/src/memtable/version.rs b/src/mito2/src/memtable/version.rs
index c12437052144..9e18edc67345 100644
--- a/src/mito2/src/memtable/version.rs
+++ b/src/mito2/src/memtable/version.rs
@@ -20,26 +20,29 @@ use smallvec::SmallVec;
use store_api::metadata::RegionMetadataRef;
use crate::error::Result;
+use crate::memtable::time_partition::TimePartitionsRef;
use crate::memtable::{MemtableId, MemtableRef};
+pub(crate) type SmallMemtableVec = SmallVec<[MemtableRef; 2]>;
+
/// A version of current memtables in a region.
#[derive(Debug, Clone)]
pub(crate) struct MemtableVersion {
/// Mutable memtable.
- pub(crate) mutable: MemtableRef,
+ pub(crate) mutable: TimePartitionsRef,
/// Immutable memtables.
///
/// We only allow one flush job per region but if a flush job failed, then we
/// might need to store more than one immutable memtable on the next time we
/// flush the region.
- immutables: SmallVec<[MemtableRef; 2]>,
+ immutables: SmallMemtableVec,
}
pub(crate) type MemtableVersionRef = Arc<MemtableVersion>;
impl MemtableVersion {
/// Returns a new [MemtableVersion] with specific mutable memtable.
- pub(crate) fn new(mutable: MemtableRef) -> MemtableVersion {
+ pub(crate) fn new(mutable: TimePartitionsRef) -> MemtableVersion {
MemtableVersion {
mutable,
immutables: SmallVec::new(),
@@ -53,8 +56,8 @@ impl MemtableVersion {
/// Lists mutable and immutable memtables.
pub(crate) fn list_memtables(&self) -> Vec<MemtableRef> {
- let mut mems = Vec::with_capacity(self.immutables.len() + 1);
- mems.push(self.mutable.clone());
+ let mut mems = Vec::with_capacity(self.immutables.len() + self.mutable.num_partitions());
+ self.mutable.list_memtables(&mut mems);
mems.extend_from_slice(&self.immutables);
mems
}
@@ -76,15 +79,13 @@ impl MemtableVersion {
// soft limit.
self.mutable.freeze()?;
// Fork the memtable.
- let mutable = self.mutable.fork(self.next_memtable_id(), metadata);
+ let mutable = Arc::new(self.mutable.fork(metadata));
// Pushes the mutable memtable to immutable list.
- let immutables = self
- .immutables
- .iter()
- .cloned()
- .chain([self.mutable.clone()])
- .collect();
+ let mut immutables =
+ SmallVec::with_capacity(self.immutables.len() + self.mutable.num_partitions());
+ self.mutable.list_memtables_to_small_vec(&mut immutables);
+ immutables.extend(self.immutables.iter().cloned());
Ok(Some(MemtableVersion {
mutable,
immutables,
@@ -103,7 +104,7 @@ impl MemtableVersion {
/// Returns the memory usage of the mutable memtable.
pub(crate) fn mutable_usage(&self) -> usize {
- self.mutable.stats().estimated_bytes
+ self.mutable.memory_usage()
}
/// Returns the memory usage of the immutable memtables.
@@ -121,9 +122,4 @@ impl MemtableVersion {
pub(crate) fn is_empty(&self) -> bool {
self.mutable.is_empty() && self.immutables.is_empty()
}
-
- /// Returns the next memtable id.
- pub(crate) fn next_memtable_id(&self) -> MemtableId {
- self.mutable.id() + 1
- }
}
diff --git a/src/mito2/src/region/opener.rs b/src/mito2/src/region/opener.rs
index 75d0b9dcfb3f..5192c55469ff 100644
--- a/src/mito2/src/region/opener.rs
+++ b/src/mito2/src/region/opener.rs
@@ -37,6 +37,7 @@ use crate::error::{
};
use crate::manifest::manager::{RegionManifestManager, RegionManifestOptions};
use crate::manifest::storage::manifest_compress_type;
+use crate::memtable::time_partition::TimePartitions;
use crate::memtable::MemtableBuilderRef;
use crate::region::options::RegionOptions;
use crate::region::version::{VersionBuilder, VersionControl, VersionControlRef};
@@ -169,7 +170,13 @@ impl RegionOpener {
RegionManifestManager::new(metadata.clone(), region_manifest_options).await?;
// Initial memtable id is 0.
- let mutable = self.memtable_builder.build(0, &metadata);
+ let part_duration = options.compaction.time_window();
+ let mutable = Arc::new(TimePartitions::new(
+ metadata.clone(),
+ self.memtable_builder,
+ 0,
+ part_duration,
+ ));
debug!("Create region {} with options: {:?}", region_id, options);
@@ -265,7 +272,13 @@ impl RegionOpener {
self.cache_manager.clone(),
));
// Initial memtable id is 0.
- let mutable = self.memtable_builder.build(0, &metadata);
+ let part_duration = region_options.compaction.time_window();
+ let mutable = Arc::new(TimePartitions::new(
+ metadata.clone(),
+ self.memtable_builder.clone(),
+ 0,
+ part_duration,
+ ));
let version = VersionBuilder::new(metadata, mutable)
.add_files(file_purger.clone(), manifest.files.values().cloned())
.flushed_entry_id(manifest.flushed_entry_id)
diff --git a/src/mito2/src/region/options.rs b/src/mito2/src/region/options.rs
index e69c7193ff50..1667b5757303 100644
--- a/src/mito2/src/region/options.rs
+++ b/src/mito2/src/region/options.rs
@@ -94,6 +94,14 @@ pub enum CompactionOptions {
Twcs(TwcsOptions),
}
+impl CompactionOptions {
+ pub(crate) fn time_window(&self) -> Option<Duration> {
+ match self {
+ CompactionOptions::Twcs(opts) => opts.time_window,
+ }
+ }
+}
+
impl Default for CompactionOptions {
fn default() -> Self {
Self::Twcs(TwcsOptions::default())
diff --git a/src/mito2/src/region/version.rs b/src/mito2/src/region/version.rs
index 261371640bc5..fa95255c1a5c 100644
--- a/src/mito2/src/region/version.rs
+++ b/src/mito2/src/region/version.rs
@@ -31,8 +31,9 @@ use store_api::storage::SequenceNumber;
use crate::error::Result;
use crate::manifest::action::RegionEdit;
+use crate::memtable::time_partition::{TimePartitions, TimePartitionsRef};
use crate::memtable::version::{MemtableVersion, MemtableVersionRef};
-use crate::memtable::{MemtableBuilderRef, MemtableId, MemtableRef};
+use crate::memtable::{MemtableBuilderRef, MemtableId};
use crate::region::options::RegionOptions;
use crate::sst::file::FileMeta;
use crate::sst::file_purger::FilePurgerRef;
@@ -122,8 +123,14 @@ impl VersionControl {
/// Mark all opened files as deleted and set the delete marker in [VersionControlData]
pub(crate) fn mark_dropped(&self, memtable_builder: &MemtableBuilderRef) {
let version = self.current().version;
- let new_mutable =
- memtable_builder.build(version.memtables.next_memtable_id(), &version.metadata);
+ let part_duration = version.memtables.mutable.part_duration();
+ let next_memtable_id = version.memtables.mutable.next_memtable_id();
+ let new_mutable = Arc::new(TimePartitions::new(
+ version.metadata.clone(),
+ memtable_builder.clone(),
+ next_memtable_id,
+ part_duration,
+ ));
let mut data = self.data.write().unwrap();
data.is_dropped = true;
@@ -140,7 +147,14 @@ impl VersionControl {
/// new schema. Memtables of the version must be empty.
pub(crate) fn alter_schema(&self, metadata: RegionMetadataRef, builder: &MemtableBuilderRef) {
let version = self.current().version;
- let new_mutable = builder.build(version.memtables.next_memtable_id(), &metadata);
+ let part_duration = version.memtables.mutable.part_duration();
+ let next_memtable_id = version.memtables.mutable.next_memtable_id();
+ let new_mutable = Arc::new(TimePartitions::new(
+ metadata.clone(),
+ builder.clone(),
+ next_memtable_id,
+ part_duration,
+ ));
debug_assert!(version.memtables.mutable.is_empty());
debug_assert!(version.memtables.immutables().is_empty());
let new_version = Arc::new(
@@ -163,8 +177,14 @@ impl VersionControl {
) {
let version = self.current().version;
- let new_mutable =
- memtable_builder.build(version.memtables.next_memtable_id(), &version.metadata);
+ let part_duration = version.memtables.mutable.part_duration();
+ let next_memtable_id = version.memtables.mutable.next_memtable_id();
+ let new_mutable = Arc::new(TimePartitions::new(
+ version.metadata.clone(),
+ memtable_builder.clone(),
+ next_memtable_id,
+ part_duration,
+ ));
let new_version = Arc::new(
VersionBuilder::new(version.metadata.clone(), new_mutable)
.flushed_entry_id(truncated_entry_id)
@@ -242,7 +262,7 @@ pub(crate) struct VersionBuilder {
impl VersionBuilder {
/// Returns a new builder.
- pub(crate) fn new(metadata: RegionMetadataRef, mutable: MemtableRef) -> Self {
+ pub(crate) fn new(metadata: RegionMetadataRef, mutable: TimePartitionsRef) -> Self {
VersionBuilder {
metadata,
memtables: Arc::new(MemtableVersion::new(mutable)),
diff --git a/src/mito2/src/test_util/memtable_util.rs b/src/mito2/src/test_util/memtable_util.rs
index 38108dff3c12..3fe378b099a0 100644
--- a/src/mito2/src/test_util/memtable_util.rs
+++ b/src/mito2/src/test_util/memtable_util.rs
@@ -21,7 +21,9 @@ use api::v1::value::ValueData;
use api::v1::{Row, Rows, SemanticType};
use datatypes::arrow::array::UInt64Array;
use datatypes::data_type::ConcreteDataType;
+use datatypes::scalars::ScalarVector;
use datatypes::schema::ColumnSchema;
+use datatypes::vectors::TimestampMillisecondVector;
use store_api::metadata::{ColumnMetadata, RegionMetadataBuilder, RegionMetadataRef};
use store_api::storage::{ColumnId, RegionId, SequenceNumber};
use table::predicate::Predicate;
@@ -58,6 +60,10 @@ impl Memtable for EmptyMemtable {
Ok(())
}
+ fn write_one(&self, _key_value: KeyValue) -> Result<()> {
+ Ok(())
+ }
+
fn iter(
&self,
_projection: Option<&[ColumnId]>,
@@ -303,3 +309,20 @@ pub(crate) fn encode_key_by_kv(key_value: &KeyValue) -> Vec<u8> {
]);
row_codec.encode(key_value.primary_keys()).unwrap()
}
+
+/// Collects timestamps from the batch iter.
+pub(crate) fn collect_iter_timestamps(iter: BoxedBatchIterator) -> Vec<i64> {
+ iter.flat_map(|batch| {
+ batch
+ .unwrap()
+ .timestamps()
+ .as_any()
+ .downcast_ref::<TimestampMillisecondVector>()
+ .unwrap()
+ .iter_data()
+ .collect::<Vec<_>>()
+ .into_iter()
+ })
+ .map(|v| v.unwrap().0.value())
+ .collect()
+}
diff --git a/src/mito2/src/test_util/version_util.rs b/src/mito2/src/test_util/version_util.rs
index da06f2b21668..c6e2f45e0b71 100644
--- a/src/mito2/src/test_util/version_util.rs
+++ b/src/mito2/src/test_util/version_util.rs
@@ -25,7 +25,7 @@ use store_api::metadata::{ColumnMetadata, RegionMetadata, RegionMetadataBuilder}
use store_api::storage::RegionId;
use crate::manifest::action::RegionEdit;
-use crate::memtable::MemtableBuilder;
+use crate::memtable::time_partition::TimePartitions;
use crate::region::version::{Version, VersionBuilder, VersionControl};
use crate::sst::file::{FileId, FileMeta};
use crate::sst::file_purger::FilePurgerRef;
@@ -101,7 +101,12 @@ impl VersionControlBuilder {
pub(crate) fn build_version(&self) -> Version {
let metadata = Arc::new(self.metadata.clone());
- let mutable = self.memtable_builder.build(0, &metadata);
+ let mutable = Arc::new(TimePartitions::new(
+ metadata.clone(),
+ self.memtable_builder.clone(),
+ 0,
+ None,
+ ));
VersionBuilder::new(metadata, mutable)
.add_files(self.file_purger.clone(), self.files.values().cloned())
.build()
|
feat
|
Partition memtables by time if compaction window is provided (#3501)
|
9f865b50abe3556800d8aba4acb5fdc002c565fa
|
2022-11-22 14:17:45
|
Ruihang Xia
|
test: add dummy select case (#618)
| false
|
diff --git a/tests/cases/standalone/select/dummy.output b/tests/cases/standalone/select/dummy.output
new file mode 100644
index 000000000000..e0ac9dc3f035
--- /dev/null
+++ b/tests/cases/standalone/select/dummy.output
@@ -0,0 +1,36 @@
+select 1;
+
++--------------------------+
+| Int64(1), #Field, #Int64 |
++--------------------------+
+| 1 |
++--------------------------+
+
+select 2 + 3;
+
++----------------------------------------+
+| Int64(2) Plus Int64(3), #Field, #Int64 |
++----------------------------------------+
+| 5 |
++----------------------------------------+
+
+select 4 + 0.5;
+
++----------------------------------------------+
+| Int64(4) Plus Float64(0.5), #Field, #Float64 |
++----------------------------------------------+
+| 4.5 |
++----------------------------------------------+
+
+select "a";
+
+Failed to execute, error: Datanode { code: 3000, msg: "Failed to execute sql, source: Cannot plan SQL: SELECT \"a\", source: Error during planning: Invalid identifier '#a' for schema fields:[], metadata:{}" }
+
+select "A";
+
+Failed to execute, error: Datanode { code: 3000, msg: "Failed to execute sql, source: Cannot plan SQL: SELECT \"A\", source: Error during planning: Invalid identifier '#A' for schema fields:[], metadata:{}" }
+
+select * where "a" = "A";
+
+Failed to execute, error: Datanode { code: 3000, msg: "Failed to execute sql, source: Cannot plan SQL: SELECT * WHERE \"a\" = \"A\", source: Error during planning: Invalid identifier '#a' for schema fields:[], metadata:{}" }
+
diff --git a/tests/cases/standalone/select/dummy.sql b/tests/cases/standalone/select/dummy.sql
new file mode 100644
index 000000000000..97d975b2e2c7
--- /dev/null
+++ b/tests/cases/standalone/select/dummy.sql
@@ -0,0 +1,11 @@
+select 1;
+
+select 2 + 3;
+
+select 4 + 0.5;
+
+select "a";
+
+select "A";
+
+select * where "a" = "A";
diff --git a/tests/runner/src/env.rs b/tests/runner/src/env.rs
index 94fdc8408886..c8b571e7fb2e 100644
--- a/tests/runner/src/env.rs
+++ b/tests/runner/src/env.rs
@@ -35,8 +35,8 @@ impl Environment for Env {
async fn start(&self, mode: &str, _config: Option<String>) -> Self::DB {
match mode {
- "local" => Self::start_local().await,
- "remote" => Self::start_remote().await,
+ "standalone" => Self::start_standalone().await,
+ "distributed" => Self::start_distributed().await,
_ => panic!("Unexpected mode: {}", mode),
}
}
@@ -48,7 +48,7 @@ impl Environment for Env {
}
impl Env {
- pub async fn start_local() -> GreptimeDB {
+ pub async fn start_standalone() -> GreptimeDB {
let server_process = Command::new("cargo")
.current_dir("../")
.args(["run", "--", "standalone", "start"])
@@ -67,7 +67,7 @@ impl Env {
}
}
- pub async fn start_remote() -> GreptimeDB {
+ pub async fn start_distributed() -> GreptimeDB {
todo!()
}
}
|
test
|
add dummy select case (#618)
|
1711ad46312e4588d6ebd40619e1900ca1420822
|
2024-01-24 11:40:05
|
Ning Sun
|
feat: add Arrow IPC output format for http rest api (#3177)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 42bbadab06c6..fba8c8582a6a 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -8495,7 +8495,10 @@ version = "0.6.0"
dependencies = [
"aide",
"api",
+ "arrow",
"arrow-flight",
+ "arrow-ipc",
+ "arrow-schema",
"async-trait",
"auth",
"axum",
diff --git a/Cargo.toml b/Cargo.toml
index 617c66bb6527..a63e0bedcca2 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -70,6 +70,7 @@ aquamarine = "0.3"
arrow = { version = "47.0" }
arrow-array = "47.0"
arrow-flight = "47.0"
+arrow-ipc = "47.0"
arrow-schema = { version = "47.0", features = ["serde"] }
async-stream = "0.3"
async-trait = "0.1"
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index b7da8935f1ab..295b1b28113d 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -14,6 +14,9 @@ testing = []
aide = { version = "0.9", features = ["axum"] }
api.workspace = true
arrow-flight.workspace = true
+arrow-ipc.workspace = true
+arrow-schema.workspace = true
+arrow.workspace = true
async-trait = "0.1"
auth.workspace = true
axum-macros = "0.3.8"
diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs
index a19d39fe3c06..905e4fe26fcd 100644
--- a/src/servers/src/error.rs
+++ b/src/servers/src/error.rs
@@ -35,6 +35,12 @@ use tonic::Code;
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
+ #[snafu(display("Arrow error"))]
+ Arrow {
+ #[snafu(source)]
+ error: arrow_schema::ArrowError,
+ },
+
#[snafu(display("Internal error: {}", err_msg))]
Internal { err_msg: String },
@@ -455,7 +461,8 @@ impl ErrorExt for Error {
| TcpIncoming { .. }
| CatalogError { .. }
| GrpcReflectionService { .. }
- | BuildHttpResponse { .. } => StatusCode::Internal,
+ | BuildHttpResponse { .. }
+ | Arrow { .. } => StatusCode::Internal,
UnsupportedDataType { .. } => StatusCode::Unsupported,
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index d84b991fa0e0..d9371cab992d 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -50,6 +50,7 @@ use tower_http::trace::TraceLayer;
use self::authorize::AuthState;
use crate::configurator::ConfiguratorRef;
use crate::error::{AlreadyStartedSnafu, Error, Result, StartHttpSnafu, ToJsonSnafu};
+use crate::http::arrow_result::ArrowResponse;
use crate::http::csv_result::CsvResponse;
use crate::http::error_result::ErrorResponse;
use crate::http::greptime_result_v1::GreptimedbV1Response;
@@ -82,6 +83,7 @@ pub mod prom_store;
pub mod prometheus;
pub mod script;
+pub mod arrow_result;
pub mod csv_result;
#[cfg(feature = "dashboard")]
mod dashboard;
@@ -247,6 +249,7 @@ pub enum GreptimeQueryOutput {
/// It allows the results of SQL queries to be presented in different formats.
#[derive(Default, Debug, Clone, Copy, PartialEq, Eq)]
pub enum ResponseFormat {
+ Arrow,
Csv,
#[default]
GreptimedbV1,
@@ -256,6 +259,7 @@ pub enum ResponseFormat {
impl ResponseFormat {
pub fn parse(s: &str) -> Option<Self> {
match s {
+ "arrow" => Some(ResponseFormat::Arrow),
"csv" => Some(ResponseFormat::Csv),
"greptimedb_v1" => Some(ResponseFormat::GreptimedbV1),
"influxdb_v1" => Some(ResponseFormat::InfluxdbV1),
@@ -265,6 +269,7 @@ impl ResponseFormat {
pub fn as_str(&self) -> &'static str {
match self {
+ ResponseFormat::Arrow => "arrow",
ResponseFormat::Csv => "csv",
ResponseFormat::GreptimedbV1 => "greptimedb_v1",
ResponseFormat::InfluxdbV1 => "influxdb_v1",
@@ -318,6 +323,7 @@ impl Display for Epoch {
#[derive(Serialize, Deserialize, Debug, JsonSchema)]
pub enum HttpResponse {
+ Arrow(ArrowResponse),
Csv(CsvResponse),
Error(ErrorResponse),
GreptimedbV1(GreptimedbV1Response),
@@ -327,6 +333,7 @@ pub enum HttpResponse {
impl HttpResponse {
pub fn with_execution_time(self, execution_time: u64) -> Self {
match self {
+ HttpResponse::Arrow(resp) => resp.with_execution_time(execution_time).into(),
HttpResponse::Csv(resp) => resp.with_execution_time(execution_time).into(),
HttpResponse::GreptimedbV1(resp) => resp.with_execution_time(execution_time).into(),
HttpResponse::InfluxdbV1(resp) => resp.with_execution_time(execution_time).into(),
@@ -338,6 +345,7 @@ impl HttpResponse {
impl IntoResponse for HttpResponse {
fn into_response(self) -> Response {
match self {
+ HttpResponse::Arrow(resp) => resp.into_response(),
HttpResponse::Csv(resp) => resp.into_response(),
HttpResponse::GreptimedbV1(resp) => resp.into_response(),
HttpResponse::InfluxdbV1(resp) => resp.into_response(),
@@ -350,6 +358,12 @@ impl OperationOutput for HttpResponse {
type Inner = Response;
}
+impl From<ArrowResponse> for HttpResponse {
+ fn from(value: ArrowResponse) -> Self {
+ HttpResponse::Arrow(value)
+ }
+}
+
impl From<CsvResponse> for HttpResponse {
fn from(value: CsvResponse) -> Self {
HttpResponse::Csv(value)
@@ -801,9 +815,12 @@ async fn handle_error(err: BoxError) -> Json<HttpResponse> {
#[cfg(test)]
mod test {
use std::future::pending;
+ use std::io::Cursor;
use std::sync::Arc;
use api::v1::greptime_request::Request;
+ use arrow_ipc::reader::FileReader;
+ use arrow_schema::DataType;
use axum::handler::Handler;
use axum::http::StatusCode;
use axum::routing::get;
@@ -942,11 +959,13 @@ mod test {
ResponseFormat::GreptimedbV1,
ResponseFormat::InfluxdbV1,
ResponseFormat::Csv,
+ ResponseFormat::Arrow,
] {
let recordbatches =
RecordBatches::try_new(schema.clone(), vec![recordbatch.clone()]).unwrap();
let outputs = vec![Ok(Output::RecordBatches(recordbatches))];
let json_resp = match format {
+ ResponseFormat::Arrow => ArrowResponse::from_output(outputs).await,
ResponseFormat::Csv => CsvResponse::from_output(outputs).await,
ResponseFormat::GreptimedbV1 => GreptimedbV1Response::from_output(outputs).await,
ResponseFormat::InfluxdbV1 => InfluxdbV1Response::from_output(outputs, None).await,
@@ -992,6 +1011,20 @@ mod test {
panic!("invalid output type");
}
}
+ HttpResponse::Arrow(resp) => {
+ let output = resp.data;
+ let mut reader =
+ FileReader::try_new(Cursor::new(output), None).expect("Arrow reader error");
+ let schema = reader.schema();
+ assert_eq!(schema.fields[0].name(), "numbers");
+ assert_eq!(schema.fields[0].data_type(), &DataType::UInt32);
+ assert_eq!(schema.fields[1].name(), "strings");
+ assert_eq!(schema.fields[1].data_type(), &DataType::Utf8);
+
+ let rb = reader.next().unwrap().expect("read record batch failed");
+ assert_eq!(rb.num_columns(), 2);
+ assert_eq!(rb.num_rows(), 4);
+ }
HttpResponse::Error(err) => unreachable!("{err:?}"),
}
}
diff --git a/src/servers/src/http/arrow_result.rs b/src/servers/src/http/arrow_result.rs
new file mode 100644
index 000000000000..78d22b20c5d7
--- /dev/null
+++ b/src/servers/src/http/arrow_result.rs
@@ -0,0 +1,141 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::pin::Pin;
+use std::sync::Arc;
+
+use arrow::datatypes::Schema;
+use arrow_ipc::writer::FileWriter;
+use axum::http::{header, HeaderName, HeaderValue};
+use axum::response::{IntoResponse, Response};
+use common_error::status_code::StatusCode;
+use common_query::Output;
+use common_recordbatch::RecordBatchStream;
+use futures::StreamExt;
+use schemars::JsonSchema;
+use serde::{Deserialize, Serialize};
+use snafu::ResultExt;
+
+use crate::error::{self, Error};
+use crate::http::error_result::ErrorResponse;
+use crate::http::header::{GREPTIME_DB_HEADER_EXECUTION_TIME, GREPTIME_DB_HEADER_FORMAT};
+use crate::http::{HttpResponse, ResponseFormat};
+
+#[derive(Serialize, Deserialize, Debug, JsonSchema)]
+pub struct ArrowResponse {
+ pub(crate) data: Vec<u8>,
+ pub(crate) execution_time_ms: u64,
+}
+
+async fn write_arrow_bytes(
+ mut recordbatches: Pin<Box<dyn RecordBatchStream + Send>>,
+ schema: &Arc<Schema>,
+) -> Result<Vec<u8>, Error> {
+ let mut bytes = Vec::new();
+ {
+ let mut writer = FileWriter::try_new(&mut bytes, schema).context(error::ArrowSnafu)?;
+
+ while let Some(rb) = recordbatches.next().await {
+ let rb = rb.context(error::CollectRecordbatchSnafu)?;
+ writer
+ .write(&rb.into_df_record_batch())
+ .context(error::ArrowSnafu)?;
+ }
+
+ writer.finish().context(error::ArrowSnafu)?;
+ }
+
+ Ok(bytes)
+}
+
+impl ArrowResponse {
+ pub async fn from_output(mut outputs: Vec<crate::error::Result<Output>>) -> HttpResponse {
+ if outputs.len() != 1 {
+ return HttpResponse::Error(ErrorResponse::from_error_message(
+ ResponseFormat::Arrow,
+ StatusCode::InvalidArguments,
+ "Multi-statements and empty query are not allowed".to_string(),
+ ));
+ }
+
+ match outputs.remove(0) {
+ Ok(output) => match output {
+ Output::AffectedRows(_rows) => HttpResponse::Arrow(ArrowResponse {
+ data: vec![],
+ execution_time_ms: 0,
+ }),
+ Output::RecordBatches(recordbatches) => {
+ let schema = recordbatches.schema();
+ match write_arrow_bytes(recordbatches.as_stream(), schema.arrow_schema()).await
+ {
+ Ok(payload) => HttpResponse::Arrow(ArrowResponse {
+ data: payload,
+ execution_time_ms: 0,
+ }),
+ Err(e) => {
+ HttpResponse::Error(ErrorResponse::from_error(ResponseFormat::Arrow, e))
+ }
+ }
+ }
+
+ Output::Stream(recordbatches) => {
+ let schema = recordbatches.schema();
+ match write_arrow_bytes(recordbatches, schema.arrow_schema()).await {
+ Ok(payload) => HttpResponse::Arrow(ArrowResponse {
+ data: payload,
+ execution_time_ms: 0,
+ }),
+ Err(e) => {
+ HttpResponse::Error(ErrorResponse::from_error(ResponseFormat::Arrow, e))
+ }
+ }
+ }
+ },
+ Err(e) => HttpResponse::Error(ErrorResponse::from_error(ResponseFormat::Arrow, e)),
+ }
+ }
+
+ pub fn with_execution_time(mut self, execution_time: u64) -> Self {
+ self.execution_time_ms = execution_time;
+ self
+ }
+
+ pub fn execution_time_ms(&self) -> u64 {
+ self.execution_time_ms
+ }
+}
+
+impl IntoResponse for ArrowResponse {
+ fn into_response(self) -> Response {
+ let execution_time = self.execution_time_ms;
+ (
+ [
+ (
+ header::CONTENT_TYPE,
+ HeaderValue::from_static("application/arrow"),
+ ),
+ (
+ HeaderName::from_static(GREPTIME_DB_HEADER_FORMAT),
+ HeaderValue::from_static("ARROW"),
+ ),
+ (
+ HeaderName::from_static(GREPTIME_DB_HEADER_EXECUTION_TIME),
+ HeaderValue::from(execution_time),
+ ),
+ ],
+ self.data,
+ )
+ .into_response()
+ }
+}
diff --git a/src/servers/src/http/handler.rs b/src/servers/src/http/handler.rs
index 3bcea4595d7b..88b32427557d 100644
--- a/src/servers/src/http/handler.rs
+++ b/src/servers/src/http/handler.rs
@@ -29,6 +29,7 @@ use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use session::context::QueryContextRef;
+use crate::http::arrow_result::ArrowResponse;
use crate::http::csv_result::CsvResponse;
use crate::http::error_result::ErrorResponse;
use crate::http::greptime_result_v1::GreptimedbV1Response;
@@ -111,6 +112,7 @@ pub async fn sql(
};
let resp = match format {
+ ResponseFormat::Arrow => ArrowResponse::from_output(outputs).await,
ResponseFormat::Csv => CsvResponse::from_output(outputs).await,
ResponseFormat::GreptimedbV1 => GreptimedbV1Response::from_output(outputs).await,
ResponseFormat::InfluxdbV1 => InfluxdbV1Response::from_output(outputs, epoch).await,
|
feat
|
add Arrow IPC output format for http rest api (#3177)
|
6bba5e0afabb3b94dda9b900b3943940baa097a9
|
2025-02-17 12:39:15
|
Yingwen
|
feat: collect stager metrics (#5553)
| false
|
diff --git a/src/mito2/src/metrics.rs b/src/mito2/src/metrics.rs
index 65a8e1dc8578..fee9044ae907 100644
--- a/src/mito2/src/metrics.rs
+++ b/src/mito2/src/metrics.rs
@@ -12,8 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::time::Duration;
+
use lazy_static::lazy_static;
use prometheus::*;
+use puffin::puffin_manager::stager::StagerNotifier;
/// Stage label.
pub const STAGE_LABEL: &str = "stage";
@@ -28,6 +31,10 @@ pub const FILE_TYPE_LABEL: &str = "file_type";
pub const WORKER_LABEL: &str = "worker";
/// Partition label.
pub const PARTITION_LABEL: &str = "partition";
+/// Staging dir type label.
+pub const STAGING_TYPE: &str = "index_staging";
+/// Recycle bin type label.
+pub const RECYCLE_TYPE: &str = "recycle_bin";
lazy_static! {
/// Global write buffer size in bytes.
@@ -381,3 +388,68 @@ lazy_static! {
exponential_buckets(0.01, 10.0, 6).unwrap(),
).unwrap();
}
+
+/// Stager notifier to collect metrics.
+pub struct StagerMetrics {
+ cache_hit: IntCounter,
+ cache_miss: IntCounter,
+ staging_cache_bytes: IntGauge,
+ recycle_cache_bytes: IntGauge,
+ cache_eviction: IntCounter,
+ staging_miss_read: Histogram,
+}
+
+impl StagerMetrics {
+ /// Creates a new stager notifier.
+ pub fn new() -> Self {
+ Self {
+ cache_hit: CACHE_HIT.with_label_values(&[STAGING_TYPE]),
+ cache_miss: CACHE_MISS.with_label_values(&[STAGING_TYPE]),
+ staging_cache_bytes: CACHE_BYTES.with_label_values(&[STAGING_TYPE]),
+ recycle_cache_bytes: CACHE_BYTES.with_label_values(&[RECYCLE_TYPE]),
+ cache_eviction: CACHE_EVICTION.with_label_values(&[STAGING_TYPE, "size"]),
+ staging_miss_read: READ_STAGE_ELAPSED.with_label_values(&["staging_miss_read"]),
+ }
+ }
+}
+
+impl Default for StagerMetrics {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl StagerNotifier for StagerMetrics {
+ fn on_cache_hit(&self, _size: u64) {
+ self.cache_hit.inc();
+ }
+
+ fn on_cache_miss(&self, _size: u64) {
+ self.cache_miss.inc();
+ }
+
+ fn on_cache_insert(&self, size: u64) {
+ self.staging_cache_bytes.add(size as i64);
+ }
+
+ fn on_load_dir(&self, duration: Duration) {
+ self.staging_miss_read.observe(duration.as_secs_f64());
+ }
+
+ fn on_load_blob(&self, duration: Duration) {
+ self.staging_miss_read.observe(duration.as_secs_f64());
+ }
+
+ fn on_cache_evict(&self, size: u64) {
+ self.cache_eviction.inc();
+ self.staging_cache_bytes.sub(size as i64);
+ }
+
+ fn on_recycle_insert(&self, size: u64) {
+ self.recycle_cache_bytes.add(size as i64);
+ }
+
+ fn on_recycle_clear(&self, size: u64) {
+ self.recycle_cache_bytes.sub(size as i64);
+ }
+}
diff --git a/src/mito2/src/sst/index/puffin_manager.rs b/src/mito2/src/sst/index/puffin_manager.rs
index 4b8b6a3dcb0f..d8559d2e07d3 100644
--- a/src/mito2/src/sst/index/puffin_manager.rs
+++ b/src/mito2/src/sst/index/puffin_manager.rs
@@ -27,8 +27,8 @@ use snafu::ResultExt;
use crate::error::{PuffinInitStagerSnafu, Result};
use crate::metrics::{
- INDEX_PUFFIN_FLUSH_OP_TOTAL, INDEX_PUFFIN_READ_BYTES_TOTAL, INDEX_PUFFIN_READ_OP_TOTAL,
- INDEX_PUFFIN_WRITE_BYTES_TOTAL, INDEX_PUFFIN_WRITE_OP_TOTAL,
+ StagerMetrics, INDEX_PUFFIN_FLUSH_OP_TOTAL, INDEX_PUFFIN_READ_BYTES_TOTAL,
+ INDEX_PUFFIN_READ_OP_TOTAL, INDEX_PUFFIN_WRITE_BYTES_TOTAL, INDEX_PUFFIN_WRITE_OP_TOTAL,
};
use crate::sst::index::store::{self, InstrumentedStore};
@@ -63,9 +63,13 @@ impl PuffinManagerFactory {
write_buffer_size: Option<usize>,
) -> Result<Self> {
let staging_dir = aux_path.as_ref().join(STAGING_DIR);
- let stager = BoundedStager::new(staging_dir, staging_capacity, None)
- .await
- .context(PuffinInitStagerSnafu)?;
+ let stager = BoundedStager::new(
+ staging_dir,
+ staging_capacity,
+ Some(Arc::new(StagerMetrics::default())),
+ )
+ .await
+ .context(PuffinInitStagerSnafu)?;
Ok(Self {
stager: Arc::new(stager),
write_buffer_size,
|
feat
|
collect stager metrics (#5553)
|
c9c5e69adf6e0c2dd8c87fdba82a573a8a8ddc97
|
2022-04-25 14:33:05
|
evenyag
|
feat: Implement BinaryType and BinaryVector
| false
|
diff --git a/src/common/src/bytes.rs b/src/common/src/bytes.rs
new file mode 100644
index 000000000000..ae09dd0f2c22
--- /dev/null
+++ b/src/common/src/bytes.rs
@@ -0,0 +1,7 @@
+/// Bytes buffer.
+#[derive(Debug, Default, Clone)]
+pub struct Bytes(Vec<u8>);
+
+/// String buffer with arbitrary encoding.
+#[derive(Debug, Default, Clone)]
+pub struct StringBytes(Vec<u8>);
diff --git a/src/common/src/lib.rs b/src/common/src/lib.rs
index ae09dd0f2c22..ad0049c4c2fc 100644
--- a/src/common/src/lib.rs
+++ b/src/common/src/lib.rs
@@ -1,7 +1 @@
-/// Bytes buffer.
-#[derive(Debug, Default, Clone)]
-pub struct Bytes(Vec<u8>);
-
-/// String buffer with arbitrary encoding.
-#[derive(Debug, Default, Clone)]
-pub struct StringBytes(Vec<u8>);
+pub mod bytes;
diff --git a/src/datatypes/src/data_type.rs b/src/datatypes/src/data_type.rs
index 122a2c173a90..4cd9f6fd157e 100644
--- a/src/datatypes/src/data_type.rs
+++ b/src/datatypes/src/data_type.rs
@@ -4,7 +4,7 @@ use crate::type_id::LogicalTypeId;
use crate::value::Value;
/// Data type abstraction.
-pub trait DataType: std::fmt::Debug {
+pub trait DataType: std::fmt::Debug + Send + Sync {
/// Name of this data type.
fn name(&self) -> &str;
diff --git a/src/datatypes/src/lib.rs b/src/datatypes/src/lib.rs
index 3b22def878ac..5c7b8b66d924 100644
--- a/src/datatypes/src/lib.rs
+++ b/src/datatypes/src/lib.rs
@@ -1,6 +1,11 @@
mod data_type;
+pub mod prelude;
mod schema;
pub mod type_id;
mod types;
pub mod value;
pub mod vectors;
+
+use arrow2::array::BinaryArray;
+
+pub type LargeBinaryArray = BinaryArray<i64>;
diff --git a/src/datatypes/src/prelude.rs b/src/datatypes/src/prelude.rs
new file mode 100644
index 000000000000..1aa4775354be
--- /dev/null
+++ b/src/datatypes/src/prelude.rs
@@ -0,0 +1,3 @@
+pub use crate::data_type::{DataType, DataTypeRef};
+pub use crate::type_id::LogicalTypeId;
+pub use crate::value::Value;
diff --git a/src/datatypes/src/types.rs b/src/datatypes/src/types.rs
index fd30e143a98a..02fa6bc718f6 100644
--- a/src/datatypes/src/types.rs
+++ b/src/datatypes/src/types.rs
@@ -1,2 +1,3 @@
+pub mod binary_type;
pub mod primitive_traits;
pub mod primitive_type;
diff --git a/src/datatypes/src/types/binary_type.rs b/src/datatypes/src/types/binary_type.rs
new file mode 100644
index 000000000000..c5d5a3ed187f
--- /dev/null
+++ b/src/datatypes/src/types/binary_type.rs
@@ -0,0 +1,30 @@
+use std::sync::Arc;
+
+use common::bytes::StringBytes;
+
+use crate::data_type::{DataType, DataTypeRef};
+use crate::type_id::LogicalTypeId;
+use crate::value::Value;
+
+#[derive(Debug, Default)]
+pub struct BinaryType;
+
+impl BinaryType {
+ pub fn arc() -> DataTypeRef {
+ Arc::new(Self)
+ }
+}
+
+impl DataType for BinaryType {
+ fn name(&self) -> &str {
+ "Binary"
+ }
+
+ fn logical_type_id(&self) -> LogicalTypeId {
+ LogicalTypeId::String
+ }
+
+ fn default_value(&self) -> Value {
+ StringBytes::default().into()
+ }
+}
diff --git a/src/datatypes/src/value.rs b/src/datatypes/src/value.rs
index 063ec321f594..3a7f597eaf75 100644
--- a/src/datatypes/src/value.rs
+++ b/src/datatypes/src/value.rs
@@ -1,4 +1,4 @@
-use common::{Bytes, StringBytes};
+use common::bytes::{Bytes, StringBytes};
/// Value holds a single arbitrary value of any [DataType](crate::data_type::DataType).
#[derive(Debug)]
@@ -57,3 +57,5 @@ impl_from!(Int32, i32);
impl_from!(Int64, i64);
impl_from!(Float32, f32);
impl_from!(Float64, f64);
+impl_from!(String, StringBytes);
+impl_from!(Binary, Bytes);
diff --git a/src/datatypes/src/vectors.rs b/src/datatypes/src/vectors.rs
index cbb95f5c331d..9adac163b6d5 100644
--- a/src/datatypes/src/vectors.rs
+++ b/src/datatypes/src/vectors.rs
@@ -1,3 +1,4 @@
+pub mod binary;
pub mod primitive;
use std::any::Any;
@@ -8,6 +9,8 @@ use crate::data_type::DataTypeRef;
/// Vector of data values.
pub trait Vector: Send + Sync {
/// Returns the data type of the vector.
+ ///
+ /// This may require heap allocation.
fn data_type(&self) -> DataTypeRef;
/// Returns the vector as [Any](std::any::Any) so that it can be
diff --git a/src/datatypes/src/vectors/binary.rs b/src/datatypes/src/vectors/binary.rs
new file mode 100644
index 000000000000..230a0ce611b5
--- /dev/null
+++ b/src/datatypes/src/vectors/binary.rs
@@ -0,0 +1,26 @@
+use std::any::Any;
+
+use crate::data_type::DataTypeRef;
+use crate::types::binary_type::BinaryType;
+use crate::vectors::Vector;
+use crate::LargeBinaryArray;
+
+/// Vector of binary strings.
+#[derive(Debug)]
+pub struct BinaryVector {
+ array: LargeBinaryArray,
+}
+
+impl Vector for BinaryVector {
+ fn data_type(&self) -> DataTypeRef {
+ BinaryType::arc()
+ }
+
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn len(&self) -> usize {
+ self.array.len()
+ }
+}
|
feat
|
Implement BinaryType and BinaryVector
|
0e2fd8e2bdadb7a4629e21a9601214b7adecf220
|
2025-03-04 10:40:12
|
Ruihang Xia
|
feat: rewrite `json_encode_path` to `geo_path` using compound type (#5640)
| false
|
diff --git a/src/common/function/src/aggr.rs b/src/common/function/src/aggr.rs
index be271d4d203f..24bcb86618ef 100644
--- a/src/common/function/src/aggr.rs
+++ b/src/common/function/src/aggr.rs
@@ -12,9 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+mod geo_path;
mod hll;
mod uddsketch_state;
+pub use geo_path::{GeoPathAccumulator, GEO_PATH_NAME};
pub(crate) use hll::HllStateType;
pub use hll::{HllState, HLL_MERGE_NAME, HLL_NAME};
pub use uddsketch_state::{UddSketchState, UDDSKETCH_STATE_NAME};
diff --git a/src/common/function/src/aggr/geo_path.rs b/src/common/function/src/aggr/geo_path.rs
new file mode 100644
index 000000000000..d5a2f71b57c7
--- /dev/null
+++ b/src/common/function/src/aggr/geo_path.rs
@@ -0,0 +1,433 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use datafusion::arrow::array::{Array, ArrayRef};
+use datafusion::common::cast::as_primitive_array;
+use datafusion::error::{DataFusionError, Result as DfResult};
+use datafusion::logical_expr::{Accumulator as DfAccumulator, AggregateUDF, Volatility};
+use datafusion::prelude::create_udaf;
+use datafusion_common::cast::{as_list_array, as_struct_array};
+use datafusion_common::utils::SingleRowListArrayBuilder;
+use datafusion_common::ScalarValue;
+use datatypes::arrow::array::{Float64Array, Int64Array, ListArray, StructArray};
+use datatypes::arrow::datatypes::{
+ DataType, Field, Float64Type, Int64Type, TimeUnit, TimestampNanosecondType,
+};
+use datatypes::compute::{self, sort_to_indices};
+
+pub const GEO_PATH_NAME: &str = "geo_path";
+
+const LATITUDE_FIELD: &str = "lat";
+const LONGITUDE_FIELD: &str = "lng";
+const TIMESTAMP_FIELD: &str = "timestamp";
+const DEFAULT_LIST_FIELD_NAME: &str = "item";
+
+#[derive(Debug, Default)]
+pub struct GeoPathAccumulator {
+ lat: Vec<Option<f64>>,
+ lng: Vec<Option<f64>>,
+ timestamp: Vec<Option<i64>>,
+}
+
+impl GeoPathAccumulator {
+ pub fn new() -> Self {
+ Self::default()
+ }
+
+ pub fn udf_impl() -> AggregateUDF {
+ create_udaf(
+ GEO_PATH_NAME,
+ // Input types: lat, lng, timestamp
+ vec![
+ DataType::Float64,
+ DataType::Float64,
+ DataType::Timestamp(TimeUnit::Nanosecond, None),
+ ],
+ // Output type: list of points {[lat], [lng]}
+ Arc::new(DataType::Struct(
+ vec![
+ Field::new(
+ LATITUDE_FIELD,
+ DataType::List(Arc::new(Field::new(
+ DEFAULT_LIST_FIELD_NAME,
+ DataType::Float64,
+ true,
+ ))),
+ false,
+ ),
+ Field::new(
+ LONGITUDE_FIELD,
+ DataType::List(Arc::new(Field::new(
+ DEFAULT_LIST_FIELD_NAME,
+ DataType::Float64,
+ true,
+ ))),
+ false,
+ ),
+ ]
+ .into(),
+ )),
+ Volatility::Immutable,
+ // Create the accumulator
+ Arc::new(|_| Ok(Box::new(GeoPathAccumulator::new()))),
+ // Intermediate state types
+ Arc::new(vec![DataType::Struct(
+ vec![
+ Field::new(
+ LATITUDE_FIELD,
+ DataType::List(Arc::new(Field::new(
+ DEFAULT_LIST_FIELD_NAME,
+ DataType::Float64,
+ true,
+ ))),
+ false,
+ ),
+ Field::new(
+ LONGITUDE_FIELD,
+ DataType::List(Arc::new(Field::new(
+ DEFAULT_LIST_FIELD_NAME,
+ DataType::Float64,
+ true,
+ ))),
+ false,
+ ),
+ Field::new(
+ TIMESTAMP_FIELD,
+ DataType::List(Arc::new(Field::new(
+ DEFAULT_LIST_FIELD_NAME,
+ DataType::Int64,
+ true,
+ ))),
+ false,
+ ),
+ ]
+ .into(),
+ )]),
+ )
+ }
+}
+
+impl DfAccumulator for GeoPathAccumulator {
+ fn update_batch(&mut self, values: &[ArrayRef]) -> datafusion::error::Result<()> {
+ if values.len() != 3 {
+ return Err(DataFusionError::Internal(format!(
+ "Expected 3 columns for geo_path, got {}",
+ values.len()
+ )));
+ }
+
+ let lat_array = as_primitive_array::<Float64Type>(&values[0])?;
+ let lng_array = as_primitive_array::<Float64Type>(&values[1])?;
+ let ts_array = as_primitive_array::<TimestampNanosecondType>(&values[2])?;
+
+ let size = lat_array.len();
+ self.lat.reserve(size);
+ self.lng.reserve(size);
+
+ for idx in 0..size {
+ self.lat.push(if lat_array.is_null(idx) {
+ None
+ } else {
+ Some(lat_array.value(idx))
+ });
+
+ self.lng.push(if lng_array.is_null(idx) {
+ None
+ } else {
+ Some(lng_array.value(idx))
+ });
+
+ self.timestamp.push(if ts_array.is_null(idx) {
+ None
+ } else {
+ Some(ts_array.value(idx))
+ });
+ }
+
+ Ok(())
+ }
+
+ fn evaluate(&mut self) -> DfResult<ScalarValue> {
+ let unordered_lng_array = Float64Array::from(self.lng.clone());
+ let unordered_lat_array = Float64Array::from(self.lat.clone());
+ let ts_array = Int64Array::from(self.timestamp.clone());
+
+ let ordered_indices = sort_to_indices(&ts_array, None, None)?;
+ let lat_array = compute::take(&unordered_lat_array, &ordered_indices, None)?;
+ let lng_array = compute::take(&unordered_lng_array, &ordered_indices, None)?;
+
+ let lat_list = Arc::new(SingleRowListArrayBuilder::new(lat_array).build_list_array());
+ let lng_list = Arc::new(SingleRowListArrayBuilder::new(lng_array).build_list_array());
+
+ let result = ScalarValue::Struct(Arc::new(StructArray::new(
+ vec![
+ Field::new(
+ LATITUDE_FIELD,
+ DataType::List(Arc::new(Field::new("item", DataType::Float64, true))),
+ false,
+ ),
+ Field::new(
+ LONGITUDE_FIELD,
+ DataType::List(Arc::new(Field::new("item", DataType::Float64, true))),
+ false,
+ ),
+ ]
+ .into(),
+ vec![lat_list, lng_list],
+ None,
+ )));
+
+ Ok(result)
+ }
+
+ fn size(&self) -> usize {
+ // Base size of GeoPathAccumulator struct fields
+ let mut total_size = std::mem::size_of::<Self>();
+
+ // Size of vectors (approximation)
+ total_size += self.lat.capacity() * std::mem::size_of::<Option<f64>>();
+ total_size += self.lng.capacity() * std::mem::size_of::<Option<f64>>();
+ total_size += self.timestamp.capacity() * std::mem::size_of::<Option<i64>>();
+
+ total_size
+ }
+
+ fn state(&mut self) -> datafusion::error::Result<Vec<ScalarValue>> {
+ let lat_array = Arc::new(ListArray::from_iter_primitive::<Float64Type, _, _>(vec![
+ Some(self.lat.clone()),
+ ]));
+ let lng_array = Arc::new(ListArray::from_iter_primitive::<Float64Type, _, _>(vec![
+ Some(self.lng.clone()),
+ ]));
+ let ts_array = Arc::new(ListArray::from_iter_primitive::<Int64Type, _, _>(vec![
+ Some(self.timestamp.clone()),
+ ]));
+
+ let state_struct = StructArray::new(
+ vec![
+ Field::new(
+ LATITUDE_FIELD,
+ DataType::List(Arc::new(Field::new("item", DataType::Float64, true))),
+ false,
+ ),
+ Field::new(
+ LONGITUDE_FIELD,
+ DataType::List(Arc::new(Field::new("item", DataType::Float64, true))),
+ false,
+ ),
+ Field::new(
+ TIMESTAMP_FIELD,
+ DataType::List(Arc::new(Field::new("item", DataType::Int64, true))),
+ false,
+ ),
+ ]
+ .into(),
+ vec![lat_array, lng_array, ts_array],
+ None,
+ );
+
+ Ok(vec![ScalarValue::Struct(Arc::new(state_struct))])
+ }
+
+ fn merge_batch(&mut self, states: &[ArrayRef]) -> datafusion::error::Result<()> {
+ if states.len() != 1 {
+ return Err(DataFusionError::Internal(format!(
+ "Expected 1 states for geo_path, got {}",
+ states.len()
+ )));
+ }
+
+ for state in states {
+ let state = as_struct_array(state)?;
+ let lat_list = as_list_array(state.column(0))?.value(0);
+ let lat_array = as_primitive_array::<Float64Type>(&lat_list)?;
+ let lng_list = as_list_array(state.column(1))?.value(0);
+ let lng_array = as_primitive_array::<Float64Type>(&lng_list)?;
+ let ts_list = as_list_array(state.column(2))?.value(0);
+ let ts_array = as_primitive_array::<Int64Type>(&ts_list)?;
+
+ self.lat.extend(lat_array);
+ self.lng.extend(lng_array);
+ self.timestamp.extend(ts_array);
+ }
+
+ Ok(())
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use datafusion::arrow::array::{Float64Array, TimestampNanosecondArray};
+ use datafusion::scalar::ScalarValue;
+
+ use super::*;
+
+ #[test]
+ fn test_geo_path_basic() {
+ let mut accumulator = GeoPathAccumulator::new();
+
+ // Create test data
+ let lat_array = Arc::new(Float64Array::from(vec![1.0, 2.0, 3.0]));
+ let lng_array = Arc::new(Float64Array::from(vec![4.0, 5.0, 6.0]));
+ let ts_array = Arc::new(TimestampNanosecondArray::from(vec![100, 200, 300]));
+
+ // Update batch
+ accumulator
+ .update_batch(&[lat_array, lng_array, ts_array])
+ .unwrap();
+
+ // Evaluate
+ let result = accumulator.evaluate().unwrap();
+ if let ScalarValue::Struct(struct_array) = result {
+ // Verify structure
+ let fields = struct_array.fields().clone();
+ assert_eq!(fields.len(), 2);
+ assert_eq!(fields[0].name(), LATITUDE_FIELD);
+ assert_eq!(fields[1].name(), LONGITUDE_FIELD);
+
+ // Verify data
+ let columns = struct_array.columns();
+ assert_eq!(columns.len(), 2);
+
+ // Check latitude values
+ let lat_list = as_list_array(&columns[0]).unwrap().value(0);
+ let lat_array = as_primitive_array::<Float64Type>(&lat_list).unwrap();
+ assert_eq!(lat_array.len(), 3);
+ assert_eq!(lat_array.value(0), 1.0);
+ assert_eq!(lat_array.value(1), 2.0);
+ assert_eq!(lat_array.value(2), 3.0);
+
+ // Check longitude values
+ let lng_list = as_list_array(&columns[1]).unwrap().value(0);
+ let lng_array = as_primitive_array::<Float64Type>(&lng_list).unwrap();
+ assert_eq!(lng_array.len(), 3);
+ assert_eq!(lng_array.value(0), 4.0);
+ assert_eq!(lng_array.value(1), 5.0);
+ assert_eq!(lng_array.value(2), 6.0);
+ } else {
+ panic!("Expected Struct scalar value");
+ }
+ }
+
+ #[test]
+ fn test_geo_path_sort_by_timestamp() {
+ let mut accumulator = GeoPathAccumulator::new();
+
+ // Create test data with unordered timestamps
+ let lat_array = Arc::new(Float64Array::from(vec![1.0, 2.0, 3.0]));
+ let lng_array = Arc::new(Float64Array::from(vec![4.0, 5.0, 6.0]));
+ let ts_array = Arc::new(TimestampNanosecondArray::from(vec![300, 100, 200]));
+
+ // Update batch
+ accumulator
+ .update_batch(&[lat_array, lng_array, ts_array])
+ .unwrap();
+
+ // Evaluate
+ let result = accumulator.evaluate().unwrap();
+ if let ScalarValue::Struct(struct_array) = result {
+ // Extract arrays
+ let columns = struct_array.columns();
+
+ // Check latitude values
+ let lat_list = as_list_array(&columns[0]).unwrap().value(0);
+ let lat_array = as_primitive_array::<Float64Type>(&lat_list).unwrap();
+ assert_eq!(lat_array.len(), 3);
+ assert_eq!(lat_array.value(0), 2.0); // timestamp 100
+ assert_eq!(lat_array.value(1), 3.0); // timestamp 200
+ assert_eq!(lat_array.value(2), 1.0); // timestamp 300
+
+ // Check longitude values (should be sorted by timestamp)
+ let lng_list = as_list_array(&columns[1]).unwrap().value(0);
+ let lng_array = as_primitive_array::<Float64Type>(&lng_list).unwrap();
+ assert_eq!(lng_array.len(), 3);
+ assert_eq!(lng_array.value(0), 5.0); // timestamp 100
+ assert_eq!(lng_array.value(1), 6.0); // timestamp 200
+ assert_eq!(lng_array.value(2), 4.0); // timestamp 300
+ } else {
+ panic!("Expected Struct scalar value");
+ }
+ }
+
+ #[test]
+ fn test_geo_path_merge() {
+ let mut accumulator1 = GeoPathAccumulator::new();
+ let mut accumulator2 = GeoPathAccumulator::new();
+
+ // Create test data for first accumulator
+ let lat_array1 = Arc::new(Float64Array::from(vec![1.0]));
+ let lng_array1 = Arc::new(Float64Array::from(vec![4.0]));
+ let ts_array1 = Arc::new(TimestampNanosecondArray::from(vec![100]));
+
+ // Create test data for second accumulator
+ let lat_array2 = Arc::new(Float64Array::from(vec![2.0]));
+ let lng_array2 = Arc::new(Float64Array::from(vec![5.0]));
+ let ts_array2 = Arc::new(TimestampNanosecondArray::from(vec![200]));
+
+ // Update batches
+ accumulator1
+ .update_batch(&[lat_array1, lng_array1, ts_array1])
+ .unwrap();
+ accumulator2
+ .update_batch(&[lat_array2, lng_array2, ts_array2])
+ .unwrap();
+
+ // Get states
+ let state1 = accumulator1.state().unwrap();
+ let state2 = accumulator2.state().unwrap();
+
+ // Create a merged accumulator
+ let mut merged = GeoPathAccumulator::new();
+
+ // Extract the struct arrays from the states
+ let state_array1 = match &state1[0] {
+ ScalarValue::Struct(array) => array.clone(),
+ _ => panic!("Expected Struct scalar value"),
+ };
+
+ let state_array2 = match &state2[0] {
+ ScalarValue::Struct(array) => array.clone(),
+ _ => panic!("Expected Struct scalar value"),
+ };
+
+ // Merge state arrays
+ merged.merge_batch(&[state_array1]).unwrap();
+ merged.merge_batch(&[state_array2]).unwrap();
+
+ // Evaluate merged result
+ let result = merged.evaluate().unwrap();
+ if let ScalarValue::Struct(struct_array) = result {
+ // Extract arrays
+ let columns = struct_array.columns();
+
+ // Check latitude values
+ let lat_list = as_list_array(&columns[0]).unwrap().value(0);
+ let lat_array = as_primitive_array::<Float64Type>(&lat_list).unwrap();
+ assert_eq!(lat_array.len(), 2);
+ assert_eq!(lat_array.value(0), 1.0); // timestamp 100
+ assert_eq!(lat_array.value(1), 2.0); // timestamp 200
+
+ // Check longitude values (should be sorted by timestamp)
+ let lng_list = as_list_array(&columns[1]).unwrap().value(0);
+ let lng_array = as_primitive_array::<Float64Type>(&lng_list).unwrap();
+ assert_eq!(lng_array.len(), 2);
+ assert_eq!(lng_array.value(0), 4.0); // timestamp 100
+ assert_eq!(lng_array.value(1), 5.0); // timestamp 200
+ } else {
+ panic!("Expected Struct scalar value");
+ }
+ }
+}
diff --git a/src/query/src/datafusion/planner.rs b/src/query/src/datafusion/planner.rs
index 13e95ee56098..912393690d54 100644
--- a/src/query/src/datafusion/planner.rs
+++ b/src/query/src/datafusion/planner.rs
@@ -19,7 +19,8 @@ use std::sync::Arc;
use arrow_schema::DataType;
use catalog::table_source::DfTableSourceProvider;
use common_function::aggr::{
- HllState, UddSketchState, HLL_MERGE_NAME, HLL_NAME, UDDSKETCH_STATE_NAME,
+ GeoPathAccumulator, HllState, UddSketchState, GEO_PATH_NAME, HLL_MERGE_NAME, HLL_NAME,
+ UDDSKETCH_STATE_NAME,
};
use common_function::scalars::udf::create_udf;
use common_query::logical_plan::create_aggregate_function;
@@ -167,12 +168,12 @@ impl ContextProvider for DfContextProviderAdapter {
fn get_aggregate_meta(&self, name: &str) -> Option<Arc<AggregateUDF>> {
if name == UDDSKETCH_STATE_NAME {
return Some(Arc::new(UddSketchState::udf_impl()));
- }
- if name == HLL_NAME {
+ } else if name == HLL_NAME {
return Some(Arc::new(HllState::state_udf_impl()));
- }
- if name == HLL_MERGE_NAME {
+ } else if name == HLL_MERGE_NAME {
return Some(Arc::new(HllState::merge_udf_impl()));
+ } else if name == GEO_PATH_NAME {
+ return Some(Arc::new(GeoPathAccumulator::udf_impl()));
}
self.engine_state.aggregate_function(name).map_or_else(
diff --git a/src/query/src/query_engine/default_serializer.rs b/src/query/src/query_engine/default_serializer.rs
index 63ae3ab4faa2..23d678986681 100644
--- a/src/query/src/query_engine/default_serializer.rs
+++ b/src/query/src/query_engine/default_serializer.rs
@@ -15,7 +15,7 @@
use std::sync::Arc;
use common_error::ext::BoxedError;
-use common_function::aggr::{HllState, UddSketchState};
+use common_function::aggr::{GeoPathAccumulator, HllState, UddSketchState};
use common_function::function_registry::FUNCTION_REGISTRY;
use common_function::scalars::udf::create_udf;
use common_query::error::RegisterUdfSnafu;
@@ -131,6 +131,7 @@ impl SubstraitPlanDecoder for DefaultPlanDecoder {
let _ = session_state.register_udaf(Arc::new(UddSketchState::udf_impl()));
let _ = session_state.register_udaf(Arc::new(HllState::state_udf_impl()));
let _ = session_state.register_udaf(Arc::new(HllState::merge_udf_impl()));
+ let _ = session_state.register_udaf(Arc::new(GeoPathAccumulator::udf_impl()));
}
let logical_plan = DFLogicalSubstraitConvertor
.decode(message, session_state)
diff --git a/tests/cases/standalone/common/function/geo.result b/tests/cases/standalone/common/function/geo.result
index b9ae2ba5806f..7b051a35ed04 100644
--- a/tests/cases/standalone/common/function/geo.result
+++ b/tests/cases/standalone/common/function/geo.result
@@ -333,15 +333,15 @@ FROM cell_cte;
| 9263763445276221387 | 808f7fc59ef01fcb | 30 | 9277415232383221760 |
+---------------------+---------------------------------+------------------------------+----------------------------------------+
-SELECT json_encode_path(37.76938, -122.3889, 1728083375::TimestampSecond);
+SELECT UNNEST(geo_path(37.76938, -122.3889, 1728083375::TimestampSecond));
-+----------------------------------------------------------------------------------------------------------------------+
-| json_encode_path(Float64(37.76938),Float64(-122.3889),arrow_cast(Int64(1728083375),Utf8("Timestamp(Second, None)"))) |
-+----------------------------------------------------------------------------------------------------------------------+
-| [[-122.3889,37.76938]] |
-+----------------------------------------------------------------------------------------------------------------------+
++--------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------+
+| unnest_placeholder(geo_path(Float64(37.76938),Float64(-122.3889),arrow_cast(Int64(1728083375),Utf8("Timestamp(Second, None)")))).lat | unnest_placeholder(geo_path(Float64(37.76938),Float64(-122.3889),arrow_cast(Int64(1728083375),Utf8("Timestamp(Second, None)")))).lng |
++--------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------+
+| [37.76938] | [-122.3889] |
++--------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------+
-SELECT json_encode_path(lat, lon, ts)
+SELECT UNNEST(geo_path(lat, lon, ts))
FROM(
SELECT 37.76938 AS lat, -122.3889 AS lon, 1728083375::TimestampSecond AS ts
UNION ALL
@@ -352,11 +352,11 @@ FROM(
SELECT 37.77001 AS lat, -122.3888 AS lon, 1728083372::TimestampSecond AS ts
);
-+-------------------------------------------------------------------------------------+
-| json_encode_path(lat,lon,ts) |
-+-------------------------------------------------------------------------------------+
-| [[-122.3888,37.77001],[-122.3839,37.76928],[-122.3889,37.76938],[-122.382,37.7693]] |
-+-------------------------------------------------------------------------------------+
++----------------------------------------------+----------------------------------------------+
+| unnest_placeholder(geo_path(lat,lon,ts)).lat | unnest_placeholder(geo_path(lat,lon,ts)).lng |
++----------------------------------------------+----------------------------------------------+
+| [37.77001, 37.76928, 37.76938, 37.7693] | [-122.3888, -122.3839, -122.3889, -122.382] |
++----------------------------------------------+----------------------------------------------+
SELECT wkt_point_from_latlng(37.76938, -122.3889) AS point;
diff --git a/tests/cases/standalone/common/function/geo.sql b/tests/cases/standalone/common/function/geo.sql
index fe424eb2287f..89bd1e6a4426 100644
--- a/tests/cases/standalone/common/function/geo.sql
+++ b/tests/cases/standalone/common/function/geo.sql
@@ -119,9 +119,9 @@ SELECT cell,
s2_cell_parent(cell, 3)
FROM cell_cte;
-SELECT json_encode_path(37.76938, -122.3889, 1728083375::TimestampSecond);
+SELECT UNNEST(geo_path(37.76938, -122.3889, 1728083375::TimestampSecond));
-SELECT json_encode_path(lat, lon, ts)
+SELECT UNNEST(geo_path(lat, lon, ts))
FROM(
SELECT 37.76938 AS lat, -122.3889 AS lon, 1728083375::TimestampSecond AS ts
UNION ALL
|
feat
|
rewrite `json_encode_path` to `geo_path` using compound type (#5640)
|
22b5a94d0208a5fb518b747ed7decfe0d6b5f8de
|
2023-04-24 07:47:11
|
Weny Xu
|
feat: support creating the physical plan for JSON and CSV files (#1424)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 73ce408e5064..85cbbb10ff65 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2906,13 +2906,16 @@ version = "0.2.0"
dependencies = [
"async-trait",
"common-catalog",
+ "common-datasource",
"common-error",
"common-procedure",
"common-procedure-test",
"common-query",
+ "common-recordbatch",
"common-telemetry",
"common-test-util",
"common-time",
+ "datafusion",
"datatypes",
"futures",
"object-store",
diff --git a/src/file-table-engine/Cargo.toml b/src/file-table-engine/Cargo.toml
index 1e73b573cbbc..a69691a0b547 100644
--- a/src/file-table-engine/Cargo.toml
+++ b/src/file-table-engine/Cargo.toml
@@ -11,12 +11,15 @@ test = ["common-test-util"]
[dependencies]
async-trait = "0.1"
common-catalog = { path = "../common/catalog" }
+common-datasource = { path = "../common/datasource" }
common-error = { path = "../common/error" }
common-procedure = { path = "../common/procedure" }
common-procedure-test = { path = "../common/procedure-test" }
common-query = { path = "../common/query" }
+common-recordbatch = { path = "../common/recordbatch" }
common-telemetry = { path = "../common/telemetry" }
common-time = { path = "../common/time" }
+datafusion.workspace = true
datatypes = { path = "../datatypes" }
futures.workspace = true
object-store = { path = "../object-store" }
diff --git a/src/file-table-engine/src/engine/immutable.rs b/src/file-table-engine/src/engine/immutable.rs
index e37ca3e24ded..0a20faa1332f 100644
--- a/src/file-table-engine/src/engine/immutable.rs
+++ b/src/file-table-engine/src/engine/immutable.rs
@@ -341,7 +341,11 @@ impl EngineInner {
table_id, table_info
);
- let table = Arc::new(ImmutableFileTable::new(table_info, metadata));
+ let table = Arc::new(
+ ImmutableFileTable::new(table_info, metadata)
+ .map_err(BoxedError::new)
+ .context(table_error::TableOperationSnafu)?,
+ );
self.tables
.write()
diff --git a/src/file-table-engine/src/error.rs b/src/file-table-engine/src/error.rs
index c9dbfd4ddb4c..db172524e626 100644
--- a/src/file-table-engine/src/error.rs
+++ b/src/file-table-engine/src/error.rs
@@ -15,6 +15,7 @@
use std::any::Any;
use common_error::prelude::*;
+use datafusion::arrow::error::ArrowError;
use serde_json::error::Error as JsonError;
use snafu::Location;
use table::metadata::{TableInfoBuilderError, TableMetaBuilderError};
@@ -122,6 +123,48 @@ pub enum Error {
#[snafu(backtrace)]
source: datatypes::error::Error,
},
+
+ #[snafu(display("Missing required field: {}", name))]
+ MissingRequiredField { name: String, location: Location },
+
+ #[snafu(display("Failed to build backend, source: {}", source))]
+ BuildBackend {
+ #[snafu(backtrace)]
+ source: common_datasource::error::Error,
+ },
+
+ #[snafu(display("Unsupported file format: {}", format))]
+ UnsupportedFileFormat { format: String, location: Location },
+
+ #[snafu(display("Failed to build csv config: {}", source))]
+ BuildCsvConfig {
+ source: common_datasource::file_format::csv::CsvConfigBuilderError,
+ location: Location,
+ },
+
+ #[snafu(display("Failed to build stream: {}", source))]
+ BuildStream {
+ source: datafusion::error::DataFusionError,
+ location: Location,
+ },
+
+ #[snafu(display("Failed to project schema: {}", source))]
+ ProjectSchema {
+ source: ArrowError,
+ location: Location,
+ },
+
+ #[snafu(display("Failed to build stream adapter: {}", source))]
+ BuildStreamAdapter {
+ #[snafu(backtrace)]
+ source: common_recordbatch::error::Error,
+ },
+
+ #[snafu(display("Failed to parse file format: {}", source))]
+ ParseFileFormat {
+ #[snafu(backtrace)]
+ source: common_datasource::error::Error,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -134,7 +177,15 @@ impl ErrorExt for Error {
TableExists { .. }
| BuildTableMeta { .. }
| BuildTableInfo { .. }
- | InvalidRawSchema { .. } => StatusCode::InvalidArguments,
+ | InvalidRawSchema { .. }
+ | UnsupportedFileFormat { .. }
+ | BuildCsvConfig { .. }
+ | ProjectSchema { .. }
+ | MissingRequiredField { .. } => StatusCode::InvalidArguments,
+
+ BuildBackend { source, .. } => source.status_code(),
+ BuildStreamAdapter { source, .. } => source.status_code(),
+ ParseFileFormat { source, .. } => source.status_code(),
WriteTableManifest { .. }
| DeleteTableManifest { .. }
@@ -145,7 +196,8 @@ impl ErrorExt for Error {
| DecodeJson { .. }
| ConvertRaw { .. }
| DropTable { .. }
- | WriteImmutableManifest { .. } => StatusCode::Unexpected,
+ | WriteImmutableManifest { .. }
+ | BuildStream { .. } => StatusCode::Unexpected,
}
}
diff --git a/src/file-table-engine/src/table.rs b/src/file-table-engine/src/table.rs
index 2eb5de637a1f..859c374d9a8c 100644
--- a/src/file-table-engine/src/table.rs
+++ b/src/file-table-engine/src/table.rs
@@ -12,4 +12,5 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+pub mod format;
pub mod immutable;
diff --git a/src/file-table-engine/src/table/format.rs b/src/file-table-engine/src/table/format.rs
new file mode 100644
index 000000000000..d99cb5ca2bc9
--- /dev/null
+++ b/src/file-table-engine/src/table/format.rs
@@ -0,0 +1,166 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use common_datasource::file_format::csv::{CsvConfigBuilder, CsvFormat, CsvOpener};
+use common_datasource::file_format::json::{JsonFormat, JsonOpener};
+use common_datasource::file_format::Format;
+use common_query::physical_plan::PhysicalPlanRef;
+use common_query::prelude::Expr;
+use common_recordbatch::adapter::RecordBatchStreamAdapter;
+use datafusion::arrow::datatypes::Schema;
+use datafusion::datasource::listing::PartitionedFile;
+use datafusion::datasource::object_store::ObjectStoreUrl;
+use datafusion::physical_plan::file_format::{FileOpener, FileScanConfig, FileStream};
+use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
+use datatypes::schema::SchemaRef;
+use object_store::ObjectStore;
+use snafu::ResultExt;
+use table::table::scan::SimpleTableScan;
+
+use crate::error::{self, Result};
+
+const DEFAULT_BATCH_SIZE: usize = 8192;
+
+#[derive(Debug, Clone, Copy, Default)]
+pub struct CreateScanPlanContext {}
+
+fn build_csv_opener(
+ file_schema: Arc<Schema>,
+ config: &ScanPlanConfig,
+ format: &CsvFormat,
+) -> Result<CsvOpener> {
+ let csv_config = CsvConfigBuilder::default()
+ .batch_size(DEFAULT_BATCH_SIZE)
+ .file_schema(file_schema)
+ .file_projection(config.projection.cloned())
+ .delimiter(format.delimiter)
+ .has_header(format.has_header)
+ .build()
+ .context(error::BuildCsvConfigSnafu)?;
+ Ok(CsvOpener::new(
+ csv_config,
+ config.store.clone(),
+ format.compression_type,
+ ))
+}
+
+fn build_json_opener(
+ file_schema: Arc<Schema>,
+ config: &ScanPlanConfig,
+ format: &JsonFormat,
+) -> Result<JsonOpener> {
+ let projected_schema = if let Some(projection) = config.projection {
+ Arc::new(
+ file_schema
+ .project(projection)
+ .context(error::ProjectSchemaSnafu)?,
+ )
+ } else {
+ file_schema
+ };
+ Ok(JsonOpener::new(
+ DEFAULT_BATCH_SIZE,
+ projected_schema,
+ config.store.clone(),
+ format.compression_type,
+ ))
+}
+
+fn build_scan_plan<T: FileOpener + Send + 'static>(
+ opener: T,
+ file_schema: Arc<Schema>,
+ files: &[String],
+ projection: Option<&Vec<usize>>,
+ limit: Option<usize>,
+) -> Result<PhysicalPlanRef> {
+ let stream = FileStream::new(
+ &FileScanConfig {
+ object_store_url: ObjectStoreUrl::parse("empty://").unwrap(), // won't be used
+ file_schema,
+ file_groups: vec![files
+ .iter()
+ .map(|filename| PartitionedFile::new(filename.to_string(), 0))
+ .collect::<Vec<_>>()],
+ statistics: Default::default(),
+ projection: projection.cloned(),
+ limit,
+ table_partition_cols: vec![],
+ output_ordering: None,
+ infinite_source: false,
+ },
+ 0, // partition: hard-code
+ opener,
+ &ExecutionPlanMetricsSet::new(),
+ )
+ .context(error::BuildStreamSnafu)?;
+ let adapter = RecordBatchStreamAdapter::try_new(Box::pin(stream))
+ .context(error::BuildStreamAdapterSnafu)?;
+ Ok(Arc::new(SimpleTableScan::new(Box::pin(adapter))))
+}
+
+fn new_csv_scan_plan(
+ _ctx: &CreateScanPlanContext,
+ config: &ScanPlanConfig,
+ format: &CsvFormat,
+) -> Result<PhysicalPlanRef> {
+ let file_schema = config.file_schema.arrow_schema().clone();
+ let opener = build_csv_opener(file_schema.clone(), config, format)?;
+ build_scan_plan(
+ opener,
+ file_schema,
+ config.files,
+ config.projection,
+ config.limit,
+ )
+}
+
+fn new_json_scan_plan(
+ _ctx: &CreateScanPlanContext,
+ config: &ScanPlanConfig,
+ format: &JsonFormat,
+) -> Result<PhysicalPlanRef> {
+ let file_schema = config.file_schema.arrow_schema().clone();
+ let opener = build_json_opener(file_schema.clone(), config, format)?;
+ build_scan_plan(
+ opener,
+ file_schema,
+ config.files,
+ config.projection,
+ config.limit,
+ )
+}
+
+#[derive(Debug, Clone)]
+pub struct ScanPlanConfig<'a> {
+ pub file_schema: SchemaRef,
+ pub files: &'a Vec<String>,
+ pub projection: Option<&'a Vec<usize>>,
+ pub filters: &'a [Expr],
+ pub limit: Option<usize>,
+ pub store: ObjectStore,
+}
+
+pub fn create_physical_plan(
+ format: &Format,
+ ctx: &CreateScanPlanContext,
+ config: &ScanPlanConfig,
+) -> Result<PhysicalPlanRef> {
+ match format {
+ Format::Csv(format) => new_csv_scan_plan(ctx, config, format),
+ Format::Json(format) => new_json_scan_plan(ctx, config, format),
+ Format::Parquet(_) => error::UnsupportedFileFormatSnafu { format: "parquet" }.fail(),
+ }
+}
diff --git a/src/file-table-engine/src/table/immutable.rs b/src/file-table-engine/src/table/immutable.rs
index 2fb7a3510719..c034858bcc36 100644
--- a/src/file-table-engine/src/table/immutable.rs
+++ b/src/file-table-engine/src/table/immutable.rs
@@ -16,26 +16,45 @@ use std::any::Any;
use std::sync::Arc;
use async_trait::async_trait;
+use common_datasource::file_format::Format;
+use common_datasource::object_store::build_backend;
+use common_error::prelude::BoxedError;
use common_query::physical_plan::PhysicalPlanRef;
use common_query::prelude::Expr;
use datatypes::schema::SchemaRef;
use object_store::ObjectStore;
-use snafu::ResultExt;
+use serde::{Deserialize, Serialize};
+use snafu::{OptionExt, ResultExt};
use store_api::storage::RegionNumber;
-use table::error::Result as TableResult;
+use table::error::{self as table_error, Result as TableResult};
use table::metadata::{RawTableInfo, TableInfo, TableInfoRef, TableType};
use table::Table;
-use crate::error::{ConvertRawSnafu, Result};
+use super::format::{create_physical_plan, CreateScanPlanContext, ScanPlanConfig};
+use crate::error::{self, ConvertRawSnafu, Result};
use crate::manifest::immutable::{
read_table_manifest, write_table_manifest, ImmutableMetadata, INIT_META_VERSION,
};
use crate::manifest::table_manifest_dir;
+pub const IMMUTABLE_TABLE_META_KEY: &str = "IMMUTABLE_TABLE_META";
+pub const IMMUTABLE_TABLE_LOCATION_KEY: &str = "LOCATION";
+pub const IMMUTABLE_TABLE_PATTERN_KEY: &str = "PATTERN";
+pub const IMMUTABLE_TABLE_FORMAT_KEY: &str = "FORMAT";
+
+#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq, Eq)]
+#[serde(default)]
+pub struct ImmutableFileTableOptions {
+ pub files: Vec<String>,
+}
+
pub struct ImmutableFileTable {
metadata: ImmutableMetadata,
// currently, it's immutable
table_info: Arc<TableInfo>,
+ object_store: ObjectStore,
+ files: Vec<String>,
+ format: Format,
}
pub type ImmutableFileTableRef = Arc<ImmutableFileTable>;
@@ -46,25 +65,40 @@ impl Table for ImmutableFileTable {
self
}
+ /// The [`SchemaRef`] before the projection.
+ /// It contains all the columns that may appear in the files (All missing columns should be filled NULLs).
fn schema(&self) -> SchemaRef {
self.table_info().meta.schema.clone()
}
- fn table_type(&self) -> TableType {
- self.table_info().table_type
- }
-
fn table_info(&self) -> TableInfoRef {
self.table_info.clone()
}
+ fn table_type(&self) -> TableType {
+ self.table_info().table_type
+ }
+
async fn scan(
&self,
- _projection: Option<&Vec<usize>>,
- _filters: &[Expr],
- _limit: Option<usize>,
+ projection: Option<&Vec<usize>>,
+ filters: &[Expr],
+ limit: Option<usize>,
) -> TableResult<PhysicalPlanRef> {
- todo!()
+ create_physical_plan(
+ &self.format,
+ &CreateScanPlanContext::default(),
+ &ScanPlanConfig {
+ file_schema: self.schema(),
+ files: &self.files,
+ projection,
+ filters,
+ limit,
+ store: self.object_store.clone(),
+ },
+ )
+ .map_err(BoxedError::new)
+ .context(table_error::TableOperationSnafu)
}
async fn flush(
@@ -87,11 +121,36 @@ impl ImmutableFileTable {
&self.metadata
}
- pub(crate) fn new(table_info: TableInfo, metadata: ImmutableMetadata) -> Self {
- Self {
+ pub(crate) fn new(table_info: TableInfo, metadata: ImmutableMetadata) -> Result<Self> {
+ let table_info = Arc::new(table_info);
+ let options = &table_info.meta.options.extra_options;
+
+ let url = options.get(IMMUTABLE_TABLE_LOCATION_KEY).context(
+ error::MissingRequiredFieldSnafu {
+ name: IMMUTABLE_TABLE_LOCATION_KEY,
+ },
+ )?;
+
+ let meta =
+ options
+ .get(IMMUTABLE_TABLE_META_KEY)
+ .context(error::MissingRequiredFieldSnafu {
+ name: IMMUTABLE_TABLE_META_KEY,
+ })?;
+
+ let meta: ImmutableFileTableOptions =
+ serde_json::from_str(meta).context(error::DecodeJsonSnafu)?;
+ let format = Format::try_from(options).context(error::ParseFileFormatSnafu)?;
+
+ let object_store = build_backend(url, options).context(error::BuildBackendSnafu)?;
+
+ Ok(Self {
metadata,
- table_info: Arc::new(table_info),
- }
+ table_info,
+ object_store,
+ files: meta.files,
+ format,
+ })
}
pub async fn create(
@@ -113,7 +172,7 @@ impl ImmutableFileTable {
)
.await?;
- Ok(ImmutableFileTable::new(table_info, metadata))
+ ImmutableFileTable::new(table_info, metadata)
}
pub(crate) async fn recover_table_info(
diff --git a/src/file-table-engine/src/test_util.rs b/src/file-table-engine/src/test_util.rs
index e568763d7054..96923d78aed1 100644
--- a/src/file-table-engine/src/test_util.rs
+++ b/src/file-table-engine/src/test_util.rs
@@ -28,6 +28,7 @@ use table::TableRef;
use crate::config::EngineConfig;
use crate::engine::immutable::ImmutableFileTableEngine;
use crate::manifest::immutable::ImmutableMetadata;
+use crate::table::immutable::{self, ImmutableFileTableOptions};
pub const TEST_TABLE_NAME: &str = "demo";
@@ -95,6 +96,20 @@ pub struct TestEngineComponents {
}
pub fn new_create_request(schema: SchemaRef) -> CreateTableRequest {
+ let mut table_options = TableOptions::default();
+ table_options.extra_options.insert(
+ immutable::IMMUTABLE_TABLE_LOCATION_KEY.to_string(),
+ "mock_path".to_string(),
+ );
+ table_options.extra_options.insert(
+ immutable::IMMUTABLE_TABLE_META_KEY.to_string(),
+ serde_json::to_string(&ImmutableFileTableOptions::default()).unwrap(),
+ );
+ table_options.extra_options.insert(
+ immutable::IMMUTABLE_TABLE_FORMAT_KEY.to_string(),
+ "csv".to_string(),
+ );
+
CreateTableRequest {
id: 1,
catalog_name: "greptime".to_string(),
@@ -105,7 +120,7 @@ pub fn new_create_request(schema: SchemaRef) -> CreateTableRequest {
region_numbers: vec![0],
create_if_not_exists: true,
primary_key_indices: vec![0],
- table_options: TableOptions::default(),
+ table_options,
engine: IMMUTABLE_FILE_ENGINE.to_string(),
}
}
|
feat
|
support creating the physical plan for JSON and CSV files (#1424)
|
584acca09d8f1755d46c6832cd6946d2f4fa404c
|
2023-08-11 12:34:42
|
Zou Wei
|
feat: impl duration type (#2117)
| false
|
diff --git a/src/common/time/src/duration.rs b/src/common/time/src/duration.rs
new file mode 100644
index 000000000000..5689423e1276
--- /dev/null
+++ b/src/common/time/src/duration.rs
@@ -0,0 +1,414 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::cmp::Ordering;
+use std::fmt::{Display, Formatter};
+use std::hash::{Hash, Hasher};
+
+use serde::{Deserialize, Serialize};
+
+use crate::timestamp::TimeUnit;
+
+/// [Duration] represents the elapsed time in either seconds, milliseconds, microseconds or nanoseconds.
+#[derive(Debug, Clone, Default, Copy, Serialize, Deserialize)]
+pub struct Duration {
+ value: i64,
+ unit: TimeUnit,
+}
+
+impl Duration {
+ /// Create a new Duration with value and TimeUnit.
+ pub fn new(value: i64, unit: TimeUnit) -> Self {
+ Self { value, unit }
+ }
+
+ /// Create a new Duration in second.
+ pub fn new_second(value: i64) -> Self {
+ Self {
+ value,
+ unit: TimeUnit::Second,
+ }
+ }
+
+ /// Create a new Duration in millisecond.
+ pub fn new_millisecond(value: i64) -> Self {
+ Self {
+ value,
+ unit: TimeUnit::Millisecond,
+ }
+ }
+
+ /// Create a new Duration in microsecond.
+ pub fn new_microsecond(value: i64) -> Self {
+ Self {
+ value,
+ unit: TimeUnit::Microsecond,
+ }
+ }
+
+ /// Create a new Duration in nanosecond.
+ pub fn new_nanosecond(value: i64) -> Self {
+ Self {
+ value,
+ unit: TimeUnit::Nanosecond,
+ }
+ }
+
+ /// Return the TimeUnit of current Duration.
+ pub fn unit(&self) -> TimeUnit {
+ self.unit
+ }
+
+ /// Return the value of current Duration.
+ pub fn value(&self) -> i64 {
+ self.value
+ }
+
+ /// Split a [Duration] into seconds part and nanoseconds part.
+ /// Notice the seconds part of split result is always rounded down to floor.
+ fn split(&self) -> (i64, u32) {
+ let sec_mul = (TimeUnit::Second.factor() / self.unit.factor()) as i64;
+ let nsec_mul = (self.unit.factor() / TimeUnit::Nanosecond.factor()) as i64;
+
+ let sec_div = self.value.div_euclid(sec_mul);
+ let sec_mod = self.value.rem_euclid(sec_mul);
+ // safety: the max possible value of `sec_mod` is 999,999,999
+ let nsec = u32::try_from(sec_mod * nsec_mul).unwrap();
+ (sec_div, nsec)
+ }
+}
+
+/// Convert i64 to Duration Type.
+/// Default TimeUnit is Millisecond.
+impl From<i64> for Duration {
+ fn from(v: i64) -> Self {
+ Self {
+ value: v,
+ unit: TimeUnit::Millisecond,
+ }
+ }
+}
+
+/// return i64 value of Duration.
+impl From<Duration> for i64 {
+ fn from(d: Duration) -> Self {
+ d.value
+ }
+}
+
+/// Convert from std::time::Duration to common_time::Duration Type.
+/// The range of std::time::Duration is [0, u64::MAX seconds + 999_999_999 nanoseconds]
+/// The range of common_time::Duration is [i64::MIN, i64::MAX] with TimeUnit.
+/// If the value of std::time::Duration is out of range of common_time::Duration,
+/// it will be rounded to the nearest value.
+impl From<std::time::Duration> for Duration {
+ fn from(d: std::time::Duration) -> Self {
+ // convert as high-precision as possible
+ let value = d.as_nanos();
+ if value <= i64::MAX as u128 {
+ return Self {
+ value: value as i64,
+ unit: TimeUnit::Nanosecond,
+ };
+ }
+
+ let value = d.as_micros();
+ if value <= i64::MAX as u128 {
+ return Self {
+ value: value as i64,
+ unit: TimeUnit::Microsecond,
+ };
+ }
+
+ let value = d.as_millis();
+ if value <= i64::MAX as u128 {
+ return Self {
+ value: value as i64,
+ unit: TimeUnit::Millisecond,
+ };
+ }
+
+ let value = d.as_secs();
+ if value <= i64::MAX as u64 {
+ return Self {
+ value: value as i64,
+ unit: TimeUnit::Second,
+ };
+ }
+
+ // overflow, return the max of common_time::Duration
+ Self {
+ value: i64::MAX,
+ unit: TimeUnit::Second,
+ }
+ }
+}
+
+impl From<Duration> for std::time::Duration {
+ fn from(d: Duration) -> Self {
+ if d.value < 0 {
+ return std::time::Duration::new(0, 0);
+ }
+ match d.unit {
+ TimeUnit::Nanosecond => std::time::Duration::from_nanos(d.value as u64),
+ TimeUnit::Microsecond => std::time::Duration::from_micros(d.value as u64),
+ TimeUnit::Millisecond => std::time::Duration::from_millis(d.value as u64),
+ TimeUnit::Second => std::time::Duration::from_secs(d.value as u64),
+ }
+ }
+}
+
+impl From<Duration> for serde_json::Value {
+ fn from(d: Duration) -> Self {
+ serde_json::Value::String(d.to_string())
+ }
+}
+
+impl PartialOrd for Duration {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+/// Duration is ordable.
+impl Ord for Duration {
+ fn cmp(&self, other: &Self) -> Ordering {
+ // fast path: most comparisons use the same unit.
+ if self.unit == other.unit {
+ return self.value.cmp(&other.value);
+ }
+
+ let (s_sec, s_nsec) = self.split();
+ let (o_sec, o_nsec) = other.split();
+ match s_sec.cmp(&o_sec) {
+ Ordering::Less => Ordering::Less,
+ Ordering::Greater => Ordering::Greater,
+ Ordering::Equal => s_nsec.cmp(&o_nsec),
+ }
+ }
+}
+
+impl Display for Duration {
+ fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
+ write!(f, "{}{}", self.value, self.unit.short_name())
+ }
+}
+
+impl PartialEq for Duration {
+ fn eq(&self, other: &Self) -> bool {
+ self.cmp(other) == Ordering::Equal
+ }
+}
+
+impl Eq for Duration {}
+
+impl Hash for Duration {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ let (sec, nsec) = self.split();
+ state.write_i64(sec);
+ state.write_u32(nsec);
+ }
+}
+
+#[cfg(test)]
+mod tests {
+
+ use std::collections::hash_map::DefaultHasher;
+ use std::hash::{Hash, Hasher};
+
+ use crate::timestamp::TimeUnit;
+ use crate::Duration;
+
+ #[test]
+ fn test_duration() {
+ let d = Duration::new(1, TimeUnit::Second);
+ assert_eq!(TimeUnit::Second, d.unit());
+ assert_eq!(1, d.value());
+ assert_eq!(Duration::new(1000, TimeUnit::Millisecond), d);
+ assert!(d > Duration::new(999, TimeUnit::Millisecond));
+ assert!(d < Duration::new(1001, TimeUnit::Millisecond));
+ }
+
+ #[test]
+ fn test_cmp_duration() {
+ let d1 = Duration::new(1, TimeUnit::Second);
+ let d2 = Duration::new(1, TimeUnit::Millisecond);
+ assert!(d1 > d2);
+
+ let d1 = Duration::new(1, TimeUnit::Second);
+ let d2 = Duration::new(1, TimeUnit::Microsecond);
+ assert!(d1 > d2);
+
+ let d1 = Duration::new(1, TimeUnit::Second);
+ let d2 = Duration::new(1_000_000_001, TimeUnit::Nanosecond);
+ assert!(d1 < d2);
+
+ let d1 = Duration::new(100, TimeUnit::Millisecond);
+ let d2 = Duration::new(1_000_001, TimeUnit::Microsecond);
+ assert!(d1 < d2);
+
+ let d1 = Duration::new(i64::MAX / 1000, TimeUnit::Second);
+ let d2 = Duration::new(i64::MAX / 1000 * 1000, TimeUnit::Millisecond);
+ assert!(d1 == d2);
+
+ let d1 = Duration::new(i64::MAX / 1000 + 1, TimeUnit::Second);
+ let d2 = Duration::new(i64::MAX / 1000 * 1000, TimeUnit::Millisecond);
+ assert!(d1 > d2);
+
+ let d1 = Duration::new(-100, TimeUnit::Millisecond);
+ let d2 = Duration::new(-100 * 999, TimeUnit::Microsecond);
+ assert!(d1 < d2);
+
+ let d1 = Duration::new(i64::MIN / 1000, TimeUnit::Millisecond);
+ let d2 = Duration::new(i64::MIN / 1000 * 1000, TimeUnit::Microsecond);
+ assert!(d1 == d2);
+ }
+
+ #[test]
+ fn test_convert_i64() {
+ let t = Duration::from(1);
+ assert_eq!(TimeUnit::Millisecond, t.unit());
+ assert_eq!(1, t.value());
+
+ let i: i64 = t.into();
+ assert_eq!(1, i);
+ }
+
+ #[test]
+ fn test_hash() {
+ let check_hash_eq = |d1: Duration, d2: Duration| {
+ let mut hasher = DefaultHasher::new();
+ d1.hash(&mut hasher);
+ let d1_hash = hasher.finish();
+
+ let mut hasher = DefaultHasher::new();
+ d2.hash(&mut hasher);
+ let d2_hash = hasher.finish();
+ d1_hash == d2_hash
+ };
+
+ let d1 = Duration::new(1, TimeUnit::Second);
+ let d2 = Duration::new(1, TimeUnit::Second);
+ assert!(check_hash_eq(d1, d2));
+
+ let d1 = Duration::new(1, TimeUnit::Second);
+ let d2 = Duration::new(1000, TimeUnit::Millisecond);
+ assert!(check_hash_eq(d1, d2));
+
+ let d1 = Duration::new(1, TimeUnit::Second);
+ let d2 = Duration::new(1_000_000, TimeUnit::Microsecond);
+ assert!(check_hash_eq(d1, d2));
+
+ let d1 = Duration::new(1, TimeUnit::Second);
+ let d2 = Duration::new(1_000_000_000, TimeUnit::Nanosecond);
+ assert!(check_hash_eq(d1, d2));
+
+ // not equal
+ let d1 = Duration::new(1, TimeUnit::Second);
+ let d2 = Duration::new(2, TimeUnit::Second);
+ assert!(!check_hash_eq(d1, d2));
+ }
+
+ #[test]
+ fn test_duration_to_string() {
+ let d = Duration::new(1, TimeUnit::Second);
+ assert_eq!("1s", d.to_string());
+
+ let d = Duration::new(2, TimeUnit::Millisecond);
+ assert_eq!("2ms", d.to_string());
+
+ let d = Duration::new(3, TimeUnit::Microsecond);
+ assert_eq!("3us", d.to_string());
+
+ let d = Duration::new(4, TimeUnit::Nanosecond);
+ assert_eq!("4ns", d.to_string());
+ }
+
+ #[test]
+ fn test_serialize_to_json_value() {
+ let d = Duration::new(1, TimeUnit::Second);
+ let json_value = serde_json::to_value(d).unwrap();
+ assert_eq!(
+ json_value,
+ serde_json::json!({"value": 1, "unit": "Second"})
+ );
+
+ let d = Duration::new(1, TimeUnit::Millisecond);
+ let json_value = serde_json::to_value(d).unwrap();
+ assert_eq!(
+ json_value,
+ serde_json::json!({"value": 1, "unit": "Millisecond"})
+ );
+ }
+
+ #[test]
+ fn test_convert_with_std_duration() {
+ // normal test
+ let std_duration = std::time::Duration::new(0, 0);
+ let duration = Duration::from(std_duration);
+ assert_eq!(duration, Duration::new(0, TimeUnit::Nanosecond));
+
+ let std_duration = std::time::Duration::new(1, 0);
+ let duration = Duration::from(std_duration);
+ assert_eq!(duration, Duration::new(1_000_000_000, TimeUnit::Nanosecond));
+
+ let std_duration = std::time::Duration::from_nanos(i64::MAX as u64);
+ let duration = Duration::from(std_duration);
+ assert_eq!(duration, Duration::new(i64::MAX, TimeUnit::Nanosecond));
+
+ let std_duration = std::time::Duration::from_nanos(i64::MAX as u64 + 1);
+ let duration = Duration::from(std_duration);
+ assert_eq!(
+ duration,
+ Duration::new(i64::MAX / 1000, TimeUnit::Microsecond)
+ );
+
+ let std_duration = std::time::Duration::from_nanos(u64::MAX);
+ let duration = Duration::from(std_duration);
+ assert_eq!(
+ duration,
+ Duration::new(18446744073709551, TimeUnit::Microsecond)
+ );
+
+ let std_duration =
+ std::time::Duration::new(i64::MAX as u64 / 1_000, (i64::MAX % 1_000 * 1_000) as u32);
+ let duration = Duration::from(std_duration);
+ assert_eq!(
+ duration,
+ Duration::new(9223372036854775000, TimeUnit::Millisecond)
+ );
+
+ let std_duration = std::time::Duration::new(i64::MAX as u64, 0);
+ let duration = Duration::from(std_duration);
+ assert_eq!(duration, Duration::new(i64::MAX, TimeUnit::Second));
+
+ // max std::time::Duration
+ let std_duration = std::time::Duration::MAX;
+ let duration = Duration::from(std_duration);
+ assert_eq!(
+ duration,
+ Duration::new(9223372036854775807, TimeUnit::Second)
+ );
+
+ // overflow test
+ let std_duration = std::time::Duration::new(i64::MAX as u64, 1);
+ let duration = Duration::from(std_duration);
+ assert_eq!(duration, Duration::new(i64::MAX, TimeUnit::Second));
+
+ // convert back to std::time::Duration
+ let duration = Duration::new(0, TimeUnit::Nanosecond);
+ let std_duration = std::time::Duration::from(duration);
+ assert_eq!(std_duration, std::time::Duration::new(0, 0));
+ }
+}
diff --git a/src/common/time/src/lib.rs b/src/common/time/src/lib.rs
index 9cd61cf6fb8b..4a47c212dc11 100644
--- a/src/common/time/src/lib.rs
+++ b/src/common/time/src/lib.rs
@@ -14,6 +14,7 @@
pub mod date;
pub mod datetime;
+pub mod duration;
pub mod error;
pub mod interval;
pub mod range;
@@ -25,6 +26,7 @@ pub mod util;
pub use date::Date;
pub use datetime::DateTime;
+pub use duration::Duration;
pub use interval::Interval;
pub use range::RangeMillis;
pub use timestamp::Timestamp;
|
feat
|
impl duration type (#2117)
|
87a730658a70b860acdd5be69640d6519349d266
|
2023-08-17 20:49:14
|
Zhenchi
|
refactor: add `ThinTable` to proxy tables from infoschema (#2193)
| false
|
diff --git a/src/catalog/src/information_schema.rs b/src/catalog/src/information_schema.rs
index 699914c1a258..90f81e45843c 100644
--- a/src/catalog/src/information_schema.rs
+++ b/src/catalog/src/information_schema.rs
@@ -15,25 +15,23 @@
mod columns;
mod tables;
-use std::any::Any;
use std::collections::HashMap;
use std::sync::{Arc, Weak};
-use async_trait::async_trait;
-use common_catalog::consts::{
- INFORMATION_SCHEMA_COLUMNS_TABLE_ID, INFORMATION_SCHEMA_NAME,
- INFORMATION_SCHEMA_TABLES_TABLE_ID,
-};
+use common_catalog::consts::INFORMATION_SCHEMA_NAME;
use common_error::ext::BoxedError;
use common_recordbatch::{RecordBatchStreamAdaptor, SendableRecordBatchStream};
use datatypes::schema::SchemaRef;
use futures_util::StreamExt;
use snafu::ResultExt;
-use store_api::data_source::{DataSource, TableFactory};
+use store_api::data_source::DataSource;
use store_api::storage::{ScanRequest, TableId};
use table::error::{SchemaConversionSnafu, TablesRecordBatchSnafu};
-use table::metadata::{TableIdent, TableInfoBuilder, TableMetaBuilder, TableType};
-use table::{Result as TableResult, Table, TableRef};
+use table::metadata::{
+ FilterPushDownType, TableIdent, TableInfoBuilder, TableInfoRef, TableMetaBuilder, TableType,
+};
+use table::thin_table::{ThinTable, ThinTableAdapter};
+use table::TableRef;
use self::columns::InformationSchemaColumns;
use crate::error::Result;
@@ -62,167 +60,90 @@ impl InformationSchemaProvider {
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
) -> HashMap<String, TableRef> {
- let mut schema = HashMap::new();
-
- schema.insert(
- TABLES.to_string(),
- Arc::new(InformationTable::new(
- catalog_name.clone(),
- INFORMATION_SCHEMA_TABLES_TABLE_ID,
- TABLES.to_string(),
- Arc::new(InformationSchemaTables::new(
- catalog_name.clone(),
- catalog_manager.clone(),
- )),
- )) as _,
- );
- schema.insert(
- COLUMNS.to_string(),
- Arc::new(InformationTable::new(
- catalog_name.clone(),
- INFORMATION_SCHEMA_COLUMNS_TABLE_ID,
- COLUMNS.to_string(),
- Arc::new(InformationSchemaColumns::new(catalog_name, catalog_manager)),
- )) as _,
- );
+ let provider = Self::new(catalog_name, catalog_manager);
+ let mut schema = HashMap::new();
+ schema.insert(TABLES.to_owned(), provider.table(TABLES).unwrap());
+ schema.insert(COLUMNS.to_owned(), provider.table(COLUMNS).unwrap());
schema
}
- pub fn table(&self, name: &str) -> Result<Option<TableRef>> {
- let (stream_builder, table_id) = match name.to_ascii_lowercase().as_ref() {
- TABLES => (
- Arc::new(InformationSchemaTables::new(
- self.catalog_name.clone(),
- self.catalog_manager.clone(),
- )) as _,
- INFORMATION_SCHEMA_TABLES_TABLE_ID,
- ),
- COLUMNS => (
- Arc::new(InformationSchemaColumns::new(
- self.catalog_name.clone(),
- self.catalog_manager.clone(),
- )) as _,
- INFORMATION_SCHEMA_COLUMNS_TABLE_ID,
- ),
- _ => {
- return Ok(None);
- }
- };
-
- Ok(Some(Arc::new(InformationTable::new(
- self.catalog_name.clone(),
- table_id,
- name.to_string(),
- stream_builder,
- ))))
+ pub fn table(&self, name: &str) -> Option<TableRef> {
+ self.information_table(name).map(|table| {
+ let schema = table.schema();
+ let table_info = Self::table_info(self.catalog_name.clone(), &table);
+ let table_type = table.table_type();
+ let data_source = Arc::new(InformationTableDataSource::new(table));
+ let filter_pushdown = FilterPushDownType::Unsupported;
+ let thin_table = ThinTable::new(schema, table_info, table_type, filter_pushdown);
+ Arc::new(ThinTableAdapter::new(thin_table, data_source)) as _
+ })
}
- pub fn table_factory(&self, name: &str) -> Result<Option<TableFactory>> {
- let (stream_builder, table_id) = match name.to_ascii_lowercase().as_ref() {
- TABLES => (
- Arc::new(InformationSchemaTables::new(
- self.catalog_name.clone(),
- self.catalog_manager.clone(),
- )) as _,
- INFORMATION_SCHEMA_TABLES_TABLE_ID,
- ),
- COLUMNS => (
- Arc::new(InformationSchemaColumns::new(
- self.catalog_name.clone(),
- self.catalog_manager.clone(),
- )) as _,
- INFORMATION_SCHEMA_COLUMNS_TABLE_ID,
- ),
- _ => {
- return Ok(None);
- }
- };
- let data_source = Arc::new(InformationTable::new(
- self.catalog_name.clone(),
- table_id,
- name.to_string(),
- stream_builder,
- ));
-
- Ok(Some(Arc::new(move || data_source.clone())))
- }
-}
-
-// TODO(ruihang): make it a more generic trait:
-// https://github.com/GreptimeTeam/greptimedb/pull/1639#discussion_r1205001903
-pub trait InformationStreamBuilder: Send + Sync {
- fn to_stream(&self) -> Result<SendableRecordBatchStream>;
-
- fn schema(&self) -> SchemaRef;
-}
-
-pub struct InformationTable {
- catalog_name: String,
- table_id: TableId,
- name: String,
- stream_builder: Arc<dyn InformationStreamBuilder>,
-}
-
-impl InformationTable {
- pub fn new(
- catalog_name: String,
- table_id: TableId,
- name: String,
- stream_builder: Arc<dyn InformationStreamBuilder>,
- ) -> Self {
- Self {
- catalog_name,
- table_id,
- name,
- stream_builder,
+ fn information_table(&self, name: &str) -> Option<InformationTableRef> {
+ match name.to_ascii_lowercase().as_str() {
+ TABLES => Some(Arc::new(InformationSchemaTables::new(
+ self.catalog_name.clone(),
+ self.catalog_manager.clone(),
+ )) as _),
+ COLUMNS => Some(Arc::new(InformationSchemaColumns::new(
+ self.catalog_name.clone(),
+ self.catalog_manager.clone(),
+ )) as _),
+ _ => None,
}
}
-}
-
-#[async_trait]
-impl Table for InformationTable {
- fn as_any(&self) -> &dyn Any {
- self
- }
- fn schema(&self) -> SchemaRef {
- self.stream_builder.schema()
- }
-
- fn table_info(&self) -> table::metadata::TableInfoRef {
+ fn table_info(catalog_name: String, table: &InformationTableRef) -> TableInfoRef {
let table_meta = TableMetaBuilder::default()
- .schema(self.stream_builder.schema())
+ .schema(table.schema())
.primary_key_indices(vec![])
.next_column_id(0)
.build()
.unwrap();
- Arc::new(
- TableInfoBuilder::default()
- .ident(TableIdent {
- table_id: self.table_id,
- version: 0,
- })
- .name(self.name.clone())
- .catalog_name(self.catalog_name.clone())
- .schema_name(INFORMATION_SCHEMA_NAME.to_string())
- .meta(table_meta)
- .table_type(TableType::Temporary)
- .build()
- .unwrap(),
- )
+ let table_info = TableInfoBuilder::default()
+ .ident(TableIdent {
+ table_id: table.table_id(),
+ version: 0,
+ })
+ .name(table.table_name().to_owned())
+ .catalog_name(catalog_name)
+ .schema_name(INFORMATION_SCHEMA_NAME.to_owned())
+ .meta(table_meta)
+ .table_type(table.table_type())
+ .build()
+ .unwrap();
+ Arc::new(table_info)
}
+}
+
+trait InformationTable {
+ fn table_id(&self) -> TableId;
+
+ fn table_name(&self) -> &'static str;
+
+ fn schema(&self) -> SchemaRef;
+
+ fn to_stream(&self) -> Result<SendableRecordBatchStream>;
fn table_type(&self) -> TableType {
TableType::Temporary
}
+}
+
+type InformationTableRef = Arc<dyn InformationTable + Send + Sync>;
+
+struct InformationTableDataSource {
+ table: InformationTableRef,
+}
- async fn scan_to_stream(&self, request: ScanRequest) -> TableResult<SendableRecordBatchStream> {
- self.get_stream(request).context(TablesRecordBatchSnafu)
+impl InformationTableDataSource {
+ fn new(table: InformationTableRef) -> Self {
+ Self { table }
}
}
-impl DataSource for InformationTable {
+impl DataSource for InformationTableDataSource {
fn get_stream(
&self,
request: ScanRequest,
@@ -230,22 +151,23 @@ impl DataSource for InformationTable {
let projection = request.projection;
let projected_schema = if let Some(projection) = &projection {
Arc::new(
- self.schema()
+ self.table
+ .schema()
.try_project(projection)
.context(SchemaConversionSnafu)
.map_err(BoxedError::new)?,
)
} else {
- self.schema()
+ self.table.schema()
};
let stream = self
- .stream_builder
+ .table
.to_stream()
.map_err(BoxedError::new)
.context(TablesRecordBatchSnafu)
.map_err(BoxedError::new)?
.map(move |batch| {
- batch.and_then(|batch| {
+ batch.and_then(|batch: common_recordbatch::RecordBatch| {
if let Some(projection) = &projection {
batch.try_project(projection)
} else {
diff --git a/src/catalog/src/information_schema/columns.rs b/src/catalog/src/information_schema/columns.rs
index be66119539f0..53b5efc0f396 100644
--- a/src/catalog/src/information_schema/columns.rs
+++ b/src/catalog/src/information_schema/columns.rs
@@ -16,8 +16,8 @@ use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::{
- INFORMATION_SCHEMA_NAME, SEMANTIC_TYPE_FIELD, SEMANTIC_TYPE_PRIMARY_KEY,
- SEMANTIC_TYPE_TIME_INDEX,
+ INFORMATION_SCHEMA_COLUMNS_TABLE_ID, INFORMATION_SCHEMA_NAME, SEMANTIC_TYPE_FIELD,
+ SEMANTIC_TYPE_PRIMARY_KEY, SEMANTIC_TYPE_TIME_INDEX,
};
use common_error::ext::BoxedError;
use common_query::physical_plan::TaskContext;
@@ -31,9 +31,10 @@ use datatypes::scalars::ScalarVectorBuilder;
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::vectors::{StringVectorBuilder, VectorRef};
use snafu::{OptionExt, ResultExt};
+use store_api::storage::TableId;
use super::tables::InformationSchemaTables;
-use super::{InformationStreamBuilder, COLUMNS, TABLES};
+use super::{InformationTable, COLUMNS, TABLES};
use crate::error::{
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
};
@@ -81,7 +82,15 @@ impl InformationSchemaColumns {
}
}
-impl InformationStreamBuilder for InformationSchemaColumns {
+impl InformationTable for InformationSchemaColumns {
+ fn table_id(&self) -> TableId {
+ INFORMATION_SCHEMA_COLUMNS_TABLE_ID
+ }
+
+ fn table_name(&self) -> &'static str {
+ COLUMNS
+ }
+
fn schema(&self) -> SchemaRef {
self.schema.clone()
}
diff --git a/src/catalog/src/information_schema/tables.rs b/src/catalog/src/information_schema/tables.rs
index 081f2f03cdde..9047aa3e594e 100644
--- a/src/catalog/src/information_schema/tables.rs
+++ b/src/catalog/src/information_schema/tables.rs
@@ -30,13 +30,14 @@ use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder};
use snafu::{OptionExt, ResultExt};
+use store_api::storage::TableId;
use table::metadata::TableType;
use super::{COLUMNS, TABLES};
use crate::error::{
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
};
-use crate::information_schema::InformationStreamBuilder;
+use crate::information_schema::InformationTable;
use crate::CatalogManager;
pub(super) struct InformationSchemaTables {
@@ -74,7 +75,15 @@ impl InformationSchemaTables {
}
}
-impl InformationStreamBuilder for InformationSchemaTables {
+impl InformationTable for InformationSchemaTables {
+ fn table_id(&self) -> TableId {
+ INFORMATION_SCHEMA_TABLES_TABLE_ID
+ }
+
+ fn table_name(&self) -> &'static str {
+ TABLES
+ }
+
fn schema(&self) -> SchemaRef {
self.schema.clone()
}
diff --git a/src/frontend/src/catalog.rs b/src/frontend/src/catalog.rs
index 548ba73627c7..22157f6665d8 100644
--- a/src/frontend/src/catalog.rs
+++ b/src/frontend/src/catalog.rs
@@ -385,7 +385,7 @@ impl CatalogManager for FrontendCatalogManager {
let provider =
InformationSchemaProvider::new(catalog.to_string(), Arc::downgrade(&manager));
- return provider.table(table_name);
+ return Ok(provider.table(table_name));
}
let key = TableNameKey::new(catalog, schema, table_name);
diff --git a/src/store-api/src/data_source.rs b/src/store-api/src/data_source.rs
index b178fb41afe9..3fbef8e08c88 100644
--- a/src/store-api/src/data_source.rs
+++ b/src/store-api/src/data_source.rs
@@ -26,6 +26,4 @@ pub trait DataSource {
fn get_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream, BoxedError>;
}
-pub type DataSourceRef = Arc<dyn DataSource>;
-
-pub type TableFactory = Arc<dyn Fn() -> DataSourceRef>;
+pub type DataSourceRef = Arc<dyn DataSource + Send + Sync>;
diff --git a/src/table/src/lib.rs b/src/table/src/lib.rs
index fa2fb5d5b1ba..a1e525e5a0a0 100644
--- a/src/table/src/lib.rs
+++ b/src/table/src/lib.rs
@@ -21,6 +21,7 @@ pub mod requests;
pub mod stats;
pub mod table;
pub mod test_util;
+pub mod thin_table;
pub use store_api::storage::RegionStat;
diff --git a/src/table/src/metadata.rs b/src/table/src/metadata.rs
index 07f43162a82e..2892338802ae 100644
--- a/src/table/src/metadata.rs
+++ b/src/table/src/metadata.rs
@@ -34,7 +34,7 @@ pub type TableVersion = u64;
/// Indicates whether and how a filter expression can be handled by a
/// Table for table scans.
-#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
+#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq)]
pub enum FilterPushDownType {
/// The expression cannot be used by the provider.
Unsupported,
diff --git a/src/table/src/thin_table.rs b/src/table/src/thin_table.rs
new file mode 100644
index 000000000000..a22b06cf4732
--- /dev/null
+++ b/src/table/src/thin_table.rs
@@ -0,0 +1,92 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::any::Any;
+
+use async_trait::async_trait;
+use common_query::prelude::Expr;
+use common_recordbatch::SendableRecordBatchStream;
+use datatypes::schema::SchemaRef;
+use snafu::ResultExt;
+use store_api::data_source::DataSourceRef;
+use store_api::storage::ScanRequest;
+
+use crate::error::{Result, TablesRecordBatchSnafu};
+use crate::metadata::{FilterPushDownType, TableInfoRef, TableType};
+use crate::Table;
+
+/// The `ThinTable` struct will replace the `Table` trait.
+/// TODO(zhongzc): After completion, perform renaming and documentation work.
+pub struct ThinTable {
+ schema: SchemaRef,
+ table_info: TableInfoRef,
+ table_type: TableType,
+ filter_pushdown: FilterPushDownType,
+}
+
+impl ThinTable {
+ pub fn new(
+ schema: SchemaRef,
+ table_info: TableInfoRef,
+ table_type: TableType,
+ filter_pushdown: FilterPushDownType,
+ ) -> Self {
+ Self {
+ schema,
+ table_info,
+ table_type,
+ filter_pushdown,
+ }
+ }
+}
+
+pub struct ThinTableAdapter {
+ table: ThinTable,
+ data_source: DataSourceRef,
+}
+
+impl ThinTableAdapter {
+ pub fn new(table: ThinTable, data_source: DataSourceRef) -> Self {
+ Self { table, data_source }
+ }
+}
+
+#[async_trait]
+impl Table for ThinTableAdapter {
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn schema(&self) -> SchemaRef {
+ self.table.schema.clone()
+ }
+
+ fn table_info(&self) -> TableInfoRef {
+ self.table.table_info.clone()
+ }
+
+ fn table_type(&self) -> TableType {
+ self.table.table_type
+ }
+
+ async fn scan_to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
+ self.data_source
+ .get_stream(request)
+ .context(TablesRecordBatchSnafu)
+ }
+
+ fn supports_filters_pushdown(&self, filters: &[&Expr]) -> Result<Vec<FilterPushDownType>> {
+ Ok(vec![self.table.filter_pushdown; filters.len()])
+ }
+}
|
refactor
|
add `ThinTable` to proxy tables from infoschema (#2193)
|
fcff66e03904d80aacb91b8edd4e15240161d264
|
2023-06-27 13:03:53
|
LFC
|
chore: deny unused results (#1825)
| false
|
diff --git a/.cargo/config.toml b/.cargo/config.toml
index bf125a3eda47..77736304c643 100644
--- a/.cargo/config.toml
+++ b/.cargo/config.toml
@@ -13,4 +13,5 @@ rustflags = [
"-Wclippy::print_stderr",
"-Wclippy::implicit_clone",
"-Aclippy::items_after_test_module",
+ "-Wunused_results",
]
diff --git a/benchmarks/src/bin/nyc-taxi.rs b/benchmarks/src/bin/nyc-taxi.rs
index 9bc6a5fd2e15..424bdcd7d635 100644
--- a/benchmarks/src/bin/nyc-taxi.rs
+++ b/benchmarks/src/bin/nyc-taxi.rs
@@ -114,7 +114,7 @@ async fn write_data(
};
let now = Instant::now();
- db.insert(requests).await.unwrap();
+ let _ = db.insert(requests).await.unwrap();
let elapsed = now.elapsed();
total_rpc_elapsed_ms += elapsed.as_millis();
progress_bar.inc(row_count as _);
@@ -377,19 +377,16 @@ fn create_table_expr() -> CreateTableExpr {
}
fn query_set() -> HashMap<String, String> {
- let mut ret = HashMap::new();
-
- ret.insert(
- "count_all".to_string(),
- format!("SELECT COUNT(*) FROM {TABLE_NAME};"),
- );
-
- ret.insert(
- "fare_amt_by_passenger".to_string(),
- format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {TABLE_NAME} GROUP BY passenger_count")
- );
-
- ret
+ HashMap::from([
+ (
+ "count_all".to_string(),
+ format!("SELECT COUNT(*) FROM {TABLE_NAME};"),
+ ),
+ (
+ "fare_amt_by_passenger".to_string(),
+ format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {TABLE_NAME} GROUP BY passenger_count"),
+ )
+ ])
}
async fn do_write(args: &Args, db: &Database) {
@@ -414,7 +411,8 @@ async fn do_write(args: &Args, db: &Database) {
let db = db.clone();
let mpb = multi_progress_bar.clone();
let pb_style = progress_bar_style.clone();
- write_jobs.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
+ let _ = write_jobs
+ .spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
}
}
while write_jobs.join_next().await.is_some() {
@@ -423,7 +421,8 @@ async fn do_write(args: &Args, db: &Database) {
let db = db.clone();
let mpb = multi_progress_bar.clone();
let pb_style = progress_bar_style.clone();
- write_jobs.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
+ let _ = write_jobs
+ .spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
}
}
}
diff --git a/src/catalog/src/helper.rs b/src/catalog/src/helper.rs
index dfad2b76d63e..fc7e263b033c 100644
--- a/src/catalog/src/helper.rs
+++ b/src/catalog/src/helper.rs
@@ -392,6 +392,6 @@ mod tests {
#[test]
fn test_table_global_value_compatibility() {
let s = r#"{"node_id":1,"regions_id_map":{"1":[0]},"table_info":{"ident":{"table_id":1098,"version":1},"name":"container_cpu_limit","desc":"Created on insertion","catalog_name":"greptime","schema_name":"dd","meta":{"schema":{"column_schemas":[{"name":"container_id","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"container_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"docker_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"host","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_tag","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"interval","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"runtime","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"short_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"type","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"dd_value","data_type":{"Float64":{}},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"ts","data_type":{"Timestamp":{"Millisecond":null}},"is_nullable":false,"is_time_index":true,"default_constraint":null,"metadata":{"greptime:time_index":"true"}},{"name":"git.repository_url","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}}],"timestamp_index":11,"version":1},"primary_key_indices":[0,1,2,3,4,5,6,7,8,9,12],"value_indices":[10,11],"engine":"mito","next_column_id":12,"region_numbers":[],"engine_options":{},"options":{},"created_on":"1970-01-01T00:00:00Z"},"table_type":"Base"}}"#;
- TableGlobalValue::parse(s).unwrap();
+ assert!(TableGlobalValue::parse(s).is_ok());
}
}
diff --git a/src/catalog/src/lib.rs b/src/catalog/src/lib.rs
index 54f69b5d41c9..926674003f1e 100644
--- a/src/catalog/src/lib.rs
+++ b/src/catalog/src/lib.rs
@@ -180,7 +180,7 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
table_name,
),
})?;
- manager
+ let _ = manager
.register_table(RegisterTableRequest {
catalog: catalog_name.clone(),
schema: schema_name.clone(),
diff --git a/src/catalog/src/local/manager.rs b/src/catalog/src/local/manager.rs
index a53c1138fb76..188ea4d0d892 100644
--- a/src/catalog/src/local/manager.rs
+++ b/src/catalog/src/local/manager.rs
@@ -118,9 +118,10 @@ impl LocalCatalogManager {
async fn init_system_catalog(&self) -> Result<()> {
// register SystemCatalogTable
- self.catalogs
+ let _ = self
+ .catalogs
.register_catalog_sync(SYSTEM_CATALOG_NAME.to_string())?;
- self.catalogs.register_schema_sync(RegisterSchemaRequest {
+ let _ = self.catalogs.register_schema_sync(RegisterSchemaRequest {
catalog: SYSTEM_CATALOG_NAME.to_string(),
schema: INFORMATION_SCHEMA_NAME.to_string(),
})?;
@@ -131,12 +132,13 @@ impl LocalCatalogManager {
table_id: SYSTEM_CATALOG_TABLE_ID,
table: self.system.information_schema.system.clone(),
};
- self.catalogs.register_table(register_table_req).await?;
+ let _ = self.catalogs.register_table(register_table_req).await?;
// register default catalog and default schema
- self.catalogs
+ let _ = self
+ .catalogs
.register_catalog_sync(DEFAULT_CATALOG_NAME.to_string())?;
- self.catalogs.register_schema_sync(RegisterSchemaRequest {
+ let _ = self.catalogs.register_schema_sync(RegisterSchemaRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
})?;
@@ -151,7 +153,8 @@ impl LocalCatalogManager {
table: numbers_table,
};
- self.catalogs
+ let _ = self
+ .catalogs
.register_table(register_number_table_req)
.await?;
@@ -226,7 +229,8 @@ impl LocalCatalogManager {
for entry in entries {
match entry {
Entry::Catalog(c) => {
- self.catalogs
+ let _ = self
+ .catalogs
.register_catalog_if_absent(c.catalog_name.clone());
info!("Register catalog: {}", c.catalog_name);
}
@@ -235,7 +239,7 @@ impl LocalCatalogManager {
catalog: s.catalog_name.clone(),
schema: s.schema_name.clone(),
};
- self.catalogs.register_schema_sync(req)?;
+ let _ = self.catalogs.register_schema_sync(req)?;
info!("Registered schema: {:?}", s);
}
Entry::Table(t) => {
@@ -297,7 +301,7 @@ impl LocalCatalogManager {
table_id: t.table_id,
table: table_ref,
};
- self.catalogs.register_table(register_request).await?;
+ let _ = self.catalogs.register_table(register_request).await?;
Ok(())
}
@@ -389,8 +393,9 @@ impl CatalogManager for LocalCatalogManager {
let engine = request.table.table_info().meta.engine.to_string();
let table_name = request.table_name.clone();
let table_id = request.table_id;
- self.catalogs.register_table(request).await?;
- self.system
+ let _ = self.catalogs.register_table(request).await?;
+ let _ = self
+ .system
.register_table(
catalog_name.clone(),
schema_name.clone(),
@@ -438,7 +443,8 @@ impl CatalogManager for LocalCatalogManager {
let engine = old_table.table_info().meta.engine.to_string();
// rename table in system catalog
- self.system
+ let _ = self
+ .system
.register_table(
catalog_name.clone(),
schema_name.clone(),
@@ -499,7 +505,8 @@ impl CatalogManager for LocalCatalogManager {
schema: schema_name,
}
);
- self.system
+ let _ = self
+ .system
.register_schema(request.catalog.clone(), schema_name.clone())
.await?;
self.catalogs.register_schema_sync(request)
diff --git a/src/catalog/src/local/memory.rs b/src/catalog/src/local/memory.rs
index 799d93a71f2b..a1334e06e86f 100644
--- a/src/catalog/src/local/memory.rs
+++ b/src/catalog/src/local/memory.rs
@@ -49,9 +49,8 @@ impl Default for MemoryCatalogManager {
catalogs: Default::default(),
};
- let mut catalog = HashMap::with_capacity(1);
- catalog.insert(DEFAULT_SCHEMA_NAME.to_string(), HashMap::new());
- manager
+ let catalog = HashMap::from([(DEFAULT_SCHEMA_NAME.to_string(), HashMap::new())]);
+ let _ = manager
.catalogs
.write()
.unwrap()
@@ -115,7 +114,7 @@ impl CatalogManager for MemoryCatalogManager {
}
let table = schema.remove(&request.table_name).unwrap();
- schema.insert(request.new_table_name, table);
+ let _ = schema.insert(request.new_table_name, table);
Ok(true)
}
@@ -144,9 +143,11 @@ impl CatalogManager for MemoryCatalogManager {
}
async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
- self.register_schema_sync(request)?;
- increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT, 1.0);
- Ok(true)
+ let registered = self.register_schema_sync(request)?;
+ if registered {
+ increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT, 1.0);
+ }
+ Ok(registered)
}
async fn register_system_table(&self, _request: RegisterSystemTableRequest) -> Result<()> {
@@ -234,9 +235,11 @@ impl CatalogManager for MemoryCatalogManager {
}
async fn register_catalog(&self, name: String) -> Result<bool> {
- self.register_catalog_sync(name)?;
- increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_CATALOG_COUNT, 1.0);
- Ok(true)
+ let registered = self.register_catalog_sync(name)?;
+ if registered {
+ increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_CATALOG_COUNT, 1.0);
+ }
+ Ok(registered)
}
fn as_any(&self) -> &dyn Any {
@@ -252,7 +255,7 @@ impl MemoryCatalogManager {
match entry {
Entry::Occupied(_) => true,
Entry::Vacant(v) => {
- v.insert(HashMap::new());
+ let _ = v.insert(HashMap::new());
false
}
}
@@ -273,7 +276,7 @@ impl MemoryCatalogManager {
if catalog.contains_key(&request.schema) {
return Ok(false);
}
- catalog.insert(request.schema, HashMap::new());
+ let _ = catalog.insert(request.schema, HashMap::new());
Ok(true)
}
@@ -310,7 +313,7 @@ impl MemoryCatalogManager {
table_id: table.table_info().ident.table_id,
table,
};
- manager.register_table_sync(request).unwrap();
+ let _ = manager.register_table_sync(request).unwrap();
manager
}
}
@@ -341,7 +344,7 @@ mod tests {
table: Arc::new(NumbersTable::default()),
};
- catalog_list.register_table(register_request).await.unwrap();
+ assert!(catalog_list.register_table(register_request).await.is_ok());
let table = catalog_list
.table(
DEFAULT_CATALOG_NAME,
@@ -390,7 +393,7 @@ mod tests {
new_table_name: new_table_name.to_string(),
table_id,
};
- catalog.rename_table(rename_request).await.unwrap();
+ assert!(catalog.rename_table(rename_request).await.is_ok());
// test old table name not exist
assert!(!catalog
@@ -492,7 +495,7 @@ mod tests {
table_id: 2333,
table: Arc::new(NumbersTable::default()),
};
- catalog.register_table(register_table_req).await.unwrap();
+ assert!(catalog.register_table(register_table_req).await.is_ok());
assert!(catalog
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
.await
diff --git a/src/catalog/src/remote/client.rs b/src/catalog/src/remote/client.rs
index 6df2fcd2616d..66e470932333 100644
--- a/src/catalog/src/remote/client.rs
+++ b/src/catalog/src/remote/client.rs
@@ -240,7 +240,7 @@ impl KvBackend for MetaKvBackend {
async fn move_value(&self, from_key: &[u8], to_key: &[u8]) -> Result<()> {
let req = MoveValueRequest::new(from_key, to_key);
- self.client.move_value(req).await.context(MetaSrvSnafu)?;
+ let _ = self.client.move_value(req).await.context(MetaSrvSnafu)?;
Ok(())
}
diff --git a/src/catalog/src/remote/manager.rs b/src/catalog/src/remote/manager.rs
index e6cc824f0811..127be693e8cf 100644
--- a/src/catalog/src/remote/manager.rs
+++ b/src/catalog/src/remote/manager.rs
@@ -112,7 +112,7 @@ impl RemoteCatalogManager {
joins.push(self.initiate_schemas(node_id, backend, engine_manager, catalog_name));
}
- futures::future::try_join_all(joins).await?;
+ let _ = futures::future::try_join_all(joins).await?;
Ok(())
}
@@ -623,13 +623,14 @@ impl CatalogManager for RemoteCatalogManager {
self.check_catalog_schema_exist(&catalog_name, &schema_name)
.await?;
- self.register_table(
- catalog_name.clone(),
- schema_name.clone(),
- request.table_name,
- request.table.clone(),
- )
- .await?;
+ let _ = self
+ .register_table(
+ catalog_name.clone(),
+ schema_name.clone(),
+ request.table_name,
+ request.table.clone(),
+ )
+ .await?;
let table_info = request.table.table_info();
let table_ident = TableIdent {
@@ -680,7 +681,8 @@ impl CatalogManager for RemoteCatalogManager {
table_id: table_info.ident.table_id,
engine: table_info.meta.engine.clone(),
};
- self.region_alive_keepers
+ let _ = self
+ .region_alive_keepers
.deregister_table(&table_ident)
.await;
}
@@ -846,7 +848,7 @@ impl CatalogManager for RemoteCatalogManager {
let catalog_key = String::from_utf8_lossy(&catalog.0);
if let Ok(key) = CatalogKey::parse(&catalog_key) {
- catalogs.insert(key.catalog_name);
+ let _ = catalogs.insert(key.catalog_name);
}
}
}
@@ -865,7 +867,7 @@ impl CatalogManager for RemoteCatalogManager {
let schema_key = String::from_utf8_lossy(&schema.0);
if let Ok(key) = SchemaKey::parse(&schema_key) {
- schemas.insert(key.schema_name);
+ let _ = schemas.insert(key.schema_name);
}
}
}
@@ -886,7 +888,7 @@ impl CatalogManager for RemoteCatalogManager {
let table_key = String::from_utf8_lossy(&table.0);
if let Ok(key) = TableRegionalKey::parse(&table_key) {
- tables.insert(key.table_name);
+ let _ = tables.insert(key.table_name);
}
}
}
diff --git a/src/catalog/src/remote/mock.rs b/src/catalog/src/remote/mock.rs
index c23e1fa757b6..248ee4a430d6 100644
--- a/src/catalog/src/remote/mock.rs
+++ b/src/catalog/src/remote/mock.rs
@@ -45,7 +45,6 @@ pub struct MockKvBackend {
impl Default for MockKvBackend {
fn default() -> Self {
- let mut map = BTreeMap::default();
let catalog_value = CatalogValue {}.as_bytes().unwrap();
let schema_value = SchemaValue {}.as_bytes().unwrap();
@@ -60,11 +59,11 @@ impl Default for MockKvBackend {
}
.to_string();
- // create default catalog and schema
- map.insert(default_catalog_key.into(), catalog_value);
- map.insert(default_schema_key.into(), schema_value);
-
- let map = RwLock::new(map);
+ let map = RwLock::new(BTreeMap::from([
+ // create default catalog and schema
+ (default_catalog_key.into(), catalog_value),
+ (default_schema_key.into(), schema_value),
+ ]));
Self { map }
}
}
@@ -109,7 +108,7 @@ impl KvBackend for MockKvBackend {
async fn set(&self, key: &[u8], val: &[u8]) -> Result<(), Error> {
let mut map = self.map.write().await;
- map.insert(key.to_vec(), val.to_vec());
+ let _ = map.insert(key.to_vec(), val.to_vec());
Ok(())
}
@@ -124,7 +123,7 @@ impl KvBackend for MockKvBackend {
match existing {
Entry::Vacant(e) => {
if expect.is_empty() {
- e.insert(val.to_vec());
+ let _ = e.insert(val.to_vec());
Ok(Ok(()))
} else {
Ok(Err(None))
@@ -132,7 +131,7 @@ impl KvBackend for MockKvBackend {
}
Entry::Occupied(mut existing) => {
if existing.get() == expect {
- existing.insert(val.to_vec());
+ let _ = existing.insert(val.to_vec());
Ok(Ok(()))
} else {
Ok(Err(Some(existing.get().clone())))
@@ -201,7 +200,7 @@ impl TableEngine for MockTableEngine {
)) as Arc<_>;
let mut tables = self.tables.write().unwrap();
- tables.insert(table_id, table.clone() as TableRef);
+ let _ = tables.insert(table_id, table.clone() as TableRef);
Ok(table)
}
diff --git a/src/catalog/src/remote/region_alive_keeper.rs b/src/catalog/src/remote/region_alive_keeper.rs
index 130b7536fd6f..be372732fb34 100644
--- a/src/catalog/src/remote/region_alive_keeper.rs
+++ b/src/catalog/src/remote/region_alive_keeper.rs
@@ -92,7 +92,7 @@ impl RegionAliveKeepers {
}
let mut keepers = self.keepers.lock().await;
- keepers.insert(table_ident.clone(), keeper.clone());
+ let _ = keepers.insert(table_ident.clone(), keeper.clone());
if self.started.load(Ordering::Relaxed) {
keeper.start().await;
@@ -237,7 +237,7 @@ impl RegionAliveKeeper {
let countdown_task_handles = Arc::downgrade(&self.countdown_task_handles);
let on_task_finished = async move {
if let Some(x) = countdown_task_handles.upgrade() {
- x.lock().await.remove(®ion);
+ let _ = x.lock().await.remove(®ion);
} // Else the countdown task handles map could be dropped because the keeper is dropped.
};
let handle = Arc::new(CountdownTaskHandle::new(
@@ -248,7 +248,7 @@ impl RegionAliveKeeper {
));
let mut handles = self.countdown_task_handles.lock().await;
- handles.insert(region, handle.clone());
+ let _ = handles.insert(region, handle.clone());
if self.started.load(Ordering::Relaxed) {
handle.start(self.heartbeat_interval_millis).await;
@@ -772,7 +772,7 @@ mod test {
};
let table_engine = Arc::new(MockTableEngine::default());
- table_engine.create_table(ctx, request).await.unwrap();
+ assert!(table_engine.create_table(ctx, request).await.is_ok());
let table_ident = TableIdent {
catalog: catalog.to_string(),
@@ -788,7 +788,7 @@ mod test {
region: 1,
rx,
};
- common_runtime::spawn_bg(async move {
+ let _handle = common_runtime::spawn_bg(async move {
task.run().await;
});
diff --git a/src/catalog/src/system.rs b/src/catalog/src/system.rs
index 854602b54fff..3eaa2baddbd4 100644
--- a/src/catalog/src/system.rs
+++ b/src/catalog/src/system.rs
@@ -228,21 +228,21 @@ pub(crate) fn build_table_deletion_request(
}
fn build_primary_key_columns(entry_type: EntryType, key: &[u8]) -> HashMap<String, VectorRef> {
- let mut m = HashMap::with_capacity(3);
- m.insert(
- "entry_type".to_string(),
- Arc::new(UInt8Vector::from_slice([entry_type as u8])) as _,
- );
- m.insert(
- "key".to_string(),
- Arc::new(BinaryVector::from_slice(&[key])) as _,
- );
- // Timestamp in key part is intentionally left to 0
- m.insert(
- "timestamp".to_string(),
- Arc::new(TimestampMillisecondVector::from_slice([0])) as _,
- );
- m
+ HashMap::from([
+ (
+ "entry_type".to_string(),
+ Arc::new(UInt8Vector::from_slice([entry_type as u8])) as VectorRef,
+ ),
+ (
+ "key".to_string(),
+ Arc::new(BinaryVector::from_slice(&[key])) as VectorRef,
+ ),
+ (
+ "timestamp".to_string(),
+ // Timestamp in key part is intentionally left to 0
+ Arc::new(TimestampMillisecondVector::from_slice([0])) as VectorRef,
+ ),
+ ])
}
pub fn build_schema_insert_request(catalog_name: String, schema_name: String) -> InsertRequest {
@@ -262,18 +262,18 @@ pub fn build_insert_request(entry_type: EntryType, key: &[u8], value: &[u8]) ->
let mut columns_values = HashMap::with_capacity(6);
columns_values.extend(primary_key_columns.into_iter());
- columns_values.insert(
+ let _ = columns_values.insert(
"value".to_string(),
Arc::new(BinaryVector::from_slice(&[value])) as _,
);
let now = util::current_time_millis();
- columns_values.insert(
+ let _ = columns_values.insert(
"gmt_created".to_string(),
Arc::new(TimestampMillisecondVector::from_slice([now])) as _,
);
- columns_values.insert(
+ let _ = columns_values.insert(
"gmt_modified".to_string(),
Arc::new(TimestampMillisecondVector::from_slice([now])) as _,
);
@@ -482,14 +482,13 @@ mod tests {
}
#[test]
- #[should_panic]
pub fn test_decode_mismatch() {
- decode_system_catalog(
+ assert!(decode_system_catalog(
Some(EntryType::Table as u8),
Some("some_catalog.some_schema.42".as_bytes()),
None,
)
- .unwrap();
+ .is_err());
}
#[test]
@@ -504,7 +503,7 @@ mod tests {
let dir = create_temp_dir("system-table-test");
let store_dir = dir.path().to_string_lossy();
let mut builder = object_store::services::Fs::default();
- builder.root(&store_dir);
+ let _ = builder.root(&store_dir);
let object_store = ObjectStore::new(builder).unwrap().finish();
let noop_compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
let table_engine = Arc::new(MitoEngine::new(
diff --git a/src/catalog/src/table_source.rs b/src/catalog/src/table_source.rs
index d5b3423c8126..fc6882015ce5 100644
--- a/src/catalog/src/table_source.rs
+++ b/src/catalog/src/table_source.rs
@@ -111,7 +111,7 @@ impl DfTableSourceProvider {
let provider = DfTableProviderAdapter::new(table);
let source = provider_as_source(Arc::new(provider));
- self.resolved_tables.insert(resolved_name, source.clone());
+ let _ = self.resolved_tables.insert(resolved_name, source.clone());
Ok(source)
}
}
diff --git a/src/catalog/tests/remote_catalog_tests.rs b/src/catalog/tests/remote_catalog_tests.rs
index 983b18ebd7fd..cafecf81c352 100644
--- a/src/catalog/tests/remote_catalog_tests.rs
+++ b/src/catalog/tests/remote_catalog_tests.rs
@@ -82,7 +82,7 @@ mod tests {
let mut res = HashSet::new();
while let Some(r) = iter.next().await {
let kv = r.unwrap();
- res.insert(String::from_utf8_lossy(&kv.0).to_string());
+ let _ = res.insert(String::from_utf8_lossy(&kv.0).to_string());
}
assert_eq!(
vec!["__c-greptime".to_string()],
@@ -305,11 +305,11 @@ mod tests {
let schema_name = "nonexistent_schema".to_string();
// register catalog to catalog manager
- components
+ assert!(components
.catalog_manager
.register_catalog(catalog_name.clone())
.await
- .unwrap();
+ .is_ok());
assert_eq!(
HashSet::<String>::from_iter(
vec![DEFAULT_CATALOG_NAME.to_string(), catalog_name.clone()].into_iter()
diff --git a/src/client/src/client.rs b/src/client/src/client.rs
index 30862a1f53ac..f5a686cc02b2 100644
--- a/src/client/src/client.rs
+++ b/src/client/src/client.rs
@@ -165,7 +165,7 @@ impl Client {
pub async fn health_check(&self) -> Result<()> {
let (_, channel) = self.find_channel()?;
let mut client = HealthCheckClient::new(channel);
- client.health_check(HealthCheckRequest {}).await?;
+ let _ = client.health_check(HealthCheckRequest {}).await?;
Ok(())
}
}
diff --git a/src/client/src/database.rs b/src/client/src/database.rs
index 524c406abbf7..b3688fb9d1c5 100644
--- a/src/client/src/database.rs
+++ b/src/client/src/database.rs
@@ -173,7 +173,7 @@ impl Database {
let mut client = self.client.make_database_client()?.inner;
let (sender, receiver) = mpsc::channel::<GreptimeRequest>(65536);
let receiver = ReceiverStream::new(receiver);
- client.handle_requests(receiver).await?;
+ let _ = client.handle_requests(receiver).await?;
Ok(sender)
}
diff --git a/src/client/src/load_balance.rs b/src/client/src/load_balance.rs
index d2837883715a..3543db5a1950 100644
--- a/src/client/src/load_balance.rs
+++ b/src/client/src/load_balance.rs
@@ -60,7 +60,7 @@ mod tests {
let random = Random;
for _ in 0..100 {
let peer = random.get_peer(&peers).unwrap();
- all.contains(peer);
+ assert!(all.contains(peer));
}
}
}
diff --git a/src/cmd/src/cli/repl.rs b/src/cmd/src/cli/repl.rs
index 56cea0dd64e5..e5c4cb5911ea 100644
--- a/src/cmd/src/cli/repl.rs
+++ b/src/cmd/src/cli/repl.rs
@@ -108,7 +108,7 @@ impl Repl {
Ok(ref line) => {
let request = line.trim();
- self.rl.add_history_entry(request.to_string());
+ let _ = self.rl.add_history_entry(request.to_string());
request.try_into()
}
@@ -137,7 +137,7 @@ impl Repl {
}
}
ReplCommand::Sql { sql } => {
- self.execute_sql(sql).await;
+ let _ = self.execute_sql(sql).await;
}
ReplCommand::Exit => {
return Ok(());
diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs
index b3518d9627c8..bd0442357808 100644
--- a/src/cmd/src/datanode.rs
+++ b/src/cmd/src/datanode.rs
@@ -326,12 +326,12 @@ mod tests {
.is_err());
// Providing node_id but leave metasrv_addr absent is ok since metasrv_addr has default value
- (StartCommand {
+ assert!((StartCommand {
node_id: Some(42),
..Default::default()
})
.load_options(TopLevelOptions::default())
- .unwrap();
+ .is_ok());
}
#[test]
diff --git a/src/cmd/tests/cli.rs b/src/cmd/tests/cli.rs
index 07ad1123cbc0..0176846f02cd 100644
--- a/src/cmd/tests/cli.rs
+++ b/src/cmd/tests/cli.rs
@@ -27,10 +27,10 @@ mod tests {
impl Repl {
fn send_line(&mut self, line: &str) {
- self.repl.send_line(line).unwrap();
+ assert!(self.repl.send_line(line).is_ok());
// read a line to consume the prompt
- self.read_line();
+ let _ = self.read_line();
}
fn read_line(&mut self) -> String {
@@ -76,7 +76,7 @@ mod tests {
std::thread::sleep(Duration::from_secs(3));
let mut repl_cmd = Command::new("./greptime");
- repl_cmd.current_dir(bin_path).args([
+ let _ = repl_cmd.current_dir(bin_path).args([
"--log-level=off",
"cli",
"attach",
@@ -105,7 +105,7 @@ mod tests {
test_select(repl);
datanode.kill().unwrap();
- datanode.wait().unwrap();
+ assert!(datanode.wait().is_ok());
}
fn test_create_database(repl: &mut Repl) {
diff --git a/src/common/base/src/lib.rs b/src/common/base/src/lib.rs
index c76e6b881bb8..5552a2ebf9e6 100644
--- a/src/common/base/src/lib.rs
+++ b/src/common/base/src/lib.rs
@@ -41,7 +41,7 @@ impl Plugins {
}
pub fn insert<T: 'static + Send + Sync>(&self, value: T) {
- self.lock().insert(value);
+ let _ = self.lock().insert(value);
}
pub fn get<T: 'static + Send + Sync + Clone>(&self) -> Option<T> {
diff --git a/src/common/datasource/src/file_format.rs b/src/common/datasource/src/file_format.rs
index 6cd0c7861516..b74e2b155836 100644
--- a/src/common/datasource/src/file_format.rs
+++ b/src/common/datasource/src/file_format.rs
@@ -213,7 +213,7 @@ pub async fn stream_to_file<T: DfRecordBatchEncoder, U: Fn(SharedBuffer) -> T>(
}
// Flushes all pending writes
- writer.try_flush(true).await?;
+ let _ = writer.try_flush(true).await?;
writer.close_inner_writer().await?;
Ok(rows)
diff --git a/src/common/datasource/src/file_format/csv.rs b/src/common/datasource/src/file_format/csv.rs
index b723ce9ddc1b..dfd2f3199af8 100644
--- a/src/common/datasource/src/file_format/csv.rs
+++ b/src/common/datasource/src/file_format/csv.rs
@@ -291,20 +291,20 @@ mod tests {
#[test]
fn test_try_from() {
- let mut map = HashMap::new();
+ let map = HashMap::new();
let format: CsvFormat = CsvFormat::try_from(&map).unwrap();
assert_eq!(format, CsvFormat::default());
- map.insert(
- FORMAT_SCHEMA_INFER_MAX_RECORD.to_string(),
- "2000".to_string(),
- );
-
- map.insert(FORMAT_COMPRESSION_TYPE.to_string(), "zstd".to_string());
- map.insert(FORMAT_DELIMITER.to_string(), b'\t'.to_string());
- map.insert(FORMAT_HAS_HEADER.to_string(), "false".to_string());
-
+ let map = HashMap::from([
+ (
+ FORMAT_SCHEMA_INFER_MAX_RECORD.to_string(),
+ "2000".to_string(),
+ ),
+ (FORMAT_COMPRESSION_TYPE.to_string(), "zstd".to_string()),
+ (FORMAT_DELIMITER.to_string(), b'\t'.to_string()),
+ (FORMAT_HAS_HEADER.to_string(), "false".to_string()),
+ ]);
let format = CsvFormat::try_from(&map).unwrap();
assert_eq!(
diff --git a/src/common/datasource/src/file_format/json.rs b/src/common/datasource/src/file_format/json.rs
index b9cf6e31a93a..9a13cc1cf1bb 100644
--- a/src/common/datasource/src/file_format/json.rs
+++ b/src/common/datasource/src/file_format/json.rs
@@ -214,18 +214,18 @@ mod tests {
#[test]
fn test_try_from() {
- let mut map = HashMap::new();
+ let map = HashMap::new();
let format = JsonFormat::try_from(&map).unwrap();
assert_eq!(format, JsonFormat::default());
- map.insert(
- FORMAT_SCHEMA_INFER_MAX_RECORD.to_string(),
- "2000".to_string(),
- );
-
- map.insert(FORMAT_COMPRESSION_TYPE.to_string(), "zstd".to_string());
-
+ let map = HashMap::from([
+ (
+ FORMAT_SCHEMA_INFER_MAX_RECORD.to_string(),
+ "2000".to_string(),
+ ),
+ (FORMAT_COMPRESSION_TYPE.to_string(), "zstd".to_string()),
+ ]);
let format = JsonFormat::try_from(&map).unwrap();
assert_eq!(
diff --git a/src/common/datasource/src/object_store/fs.rs b/src/common/datasource/src/object_store/fs.rs
index 78a481b2948b..7f43c50591dc 100644
--- a/src/common/datasource/src/object_store/fs.rs
+++ b/src/common/datasource/src/object_store/fs.rs
@@ -20,7 +20,7 @@ use crate::error::{BuildBackendSnafu, Result};
pub fn build_fs_backend(root: &str) -> Result<ObjectStore> {
let mut builder = Fs::default();
- builder.root(root);
+ let _ = builder.root(root);
let object_store = ObjectStore::new(builder)
.context(BuildBackendSnafu)?
.finish();
diff --git a/src/common/datasource/src/object_store/s3.rs b/src/common/datasource/src/object_store/s3.rs
index 0ebd80411b21..f1c39dbe05a7 100644
--- a/src/common/datasource/src/object_store/s3.rs
+++ b/src/common/datasource/src/object_store/s3.rs
@@ -34,28 +34,26 @@ pub fn build_s3_backend(
) -> Result<ObjectStore> {
let mut builder = S3::default();
- builder.root(path);
-
- builder.bucket(host);
+ let _ = builder.root(path).bucket(host);
if let Some(endpoint) = connection.get(ENDPOINT_URL) {
- builder.endpoint(endpoint);
+ let _ = builder.endpoint(endpoint);
}
if let Some(region) = connection.get(REGION) {
- builder.region(region);
+ let _ = builder.region(region);
}
if let Some(key_id) = connection.get(ACCESS_KEY_ID) {
- builder.access_key_id(key_id);
+ let _ = builder.access_key_id(key_id);
}
if let Some(key) = connection.get(SECRET_ACCESS_KEY) {
- builder.secret_access_key(key);
+ let _ = builder.secret_access_key(key);
}
if let Some(session_token) = connection.get(SESSION_TOKEN) {
- builder.security_token(session_token);
+ let _ = builder.security_token(session_token);
}
if let Some(enable_str) = connection.get(ENABLE_VIRTUAL_HOST_STYLE) {
@@ -69,7 +67,7 @@ pub fn build_s3_backend(
.build()
})?;
if enable {
- builder.enable_virtual_host_style();
+ let _ = builder.enable_virtual_host_style();
}
}
diff --git a/src/common/datasource/src/test_util.rs b/src/common/datasource/src/test_util.rs
index ab04017f1644..0117f54087a5 100644
--- a/src/common/datasource/src/test_util.rs
+++ b/src/common/datasource/src/test_util.rs
@@ -55,7 +55,7 @@ pub fn format_schema(schema: Schema) -> Vec<String> {
pub fn test_store(root: &str) -> ObjectStore {
let mut builder = Fs::default();
- builder.root(root);
+ let _ = builder.root(root);
ObjectStore::new(builder).unwrap().finish()
}
@@ -64,7 +64,7 @@ pub fn test_tmp_store(root: &str) -> (ObjectStore, TempDir) {
let dir = create_temp_dir(root);
let mut builder = Fs::default();
- builder.root("/");
+ let _ = builder.root("/");
(ObjectStore::new(builder).unwrap().finish(), dir)
}
@@ -113,14 +113,14 @@ pub async fn setup_stream_to_json_test(origin_path: &str, threshold: impl Fn(usi
let output_path = format!("{}/{}", dir.path().display(), "output");
- stream_to_json(
+ assert!(stream_to_json(
Box::pin(stream),
tmp_store.clone(),
&output_path,
threshold(size),
)
.await
- .unwrap();
+ .is_ok());
let written = tmp_store.read(&output_path).await.unwrap();
let origin = store.read(origin_path).await.unwrap();
@@ -155,14 +155,14 @@ pub async fn setup_stream_to_csv_test(origin_path: &str, threshold: impl Fn(usiz
let output_path = format!("{}/{}", dir.path().display(), "output");
- stream_to_csv(
+ assert!(stream_to_csv(
Box::pin(stream),
tmp_store.clone(),
&output_path,
threshold(size),
)
.await
- .unwrap();
+ .is_ok());
let written = tmp_store.read(&output_path).await.unwrap();
let origin = store.read(origin_path).await.unwrap();
diff --git a/src/common/function-macro/tests/test_derive.rs b/src/common/function-macro/tests/test_derive.rs
index 253a3ae3e41d..db2b469e9b36 100644
--- a/src/common/function-macro/tests/test_derive.rs
+++ b/src/common/function-macro/tests/test_derive.rs
@@ -22,7 +22,7 @@ struct Foo {}
#[test]
#[allow(clippy::extra_unused_type_parameters)]
fn test_derive() {
- Foo::default();
+ let _ = Foo::default();
assert_fields!(Foo: input_types);
assert_impl_all!(Foo: std::fmt::Debug, Default, AggrFuncTypeStore);
}
diff --git a/src/common/function/src/scalars/function_registry.rs b/src/common/function/src/scalars/function_registry.rs
index d25341c7ab23..0e8c1c4b1943 100644
--- a/src/common/function/src/scalars/function_registry.rs
+++ b/src/common/function/src/scalars/function_registry.rs
@@ -32,14 +32,16 @@ pub struct FunctionRegistry {
impl FunctionRegistry {
pub fn register(&self, func: FunctionRef) {
- self.functions
+ let _ = self
+ .functions
.write()
.unwrap()
.insert(func.name().to_string(), func);
}
pub fn register_aggregate_function(&self, func: AggregateFunctionMetaRef) {
- self.aggregate_functions
+ let _ = self
+ .aggregate_functions
.write()
.unwrap()
.insert(func.name(), func);
diff --git a/src/common/grpc-expr/src/insert.rs b/src/common/grpc-expr/src/insert.rs
index c98d3a2182e9..d96492185674 100644
--- a/src/common/grpc-expr/src/insert.rs
+++ b/src/common/grpc-expr/src/insert.rs
@@ -77,7 +77,7 @@ pub fn find_new_columns(schema: &SchemaRef, columns: &[Column]) -> Result<Option
is_key: *semantic_type == TAG_SEMANTIC_TYPE,
location: None,
});
- new_columns.insert(column_name.to_string());
+ let _ = new_columns.insert(column_name.to_string());
}
}
@@ -239,7 +239,7 @@ pub fn build_create_expr_from_insertion(
let column_def = build_column_def(column_name, *datatype, is_nullable);
column_defs.push(column_def);
- new_columns.insert(column_name.to_string());
+ let _ = new_columns.insert(column_name.to_string());
}
}
diff --git a/src/common/grpc/benches/channel_manager.rs b/src/common/grpc/benches/channel_manager.rs
index 3ba2269b2fc2..db937a940722 100644
--- a/src/common/grpc/benches/channel_manager.rs
+++ b/src/common/grpc/benches/channel_manager.rs
@@ -39,7 +39,7 @@ async fn do_bench_channel_manager() {
}
fn bench_channel_manager(c: &mut Criterion) {
- c.bench_function("bench channel manager", |b| {
+ let _ = c.bench_function("bench channel manager", |b| {
b.iter(do_bench_channel_manager);
});
}
diff --git a/src/common/grpc/src/channel_manager.rs b/src/common/grpc/src/channel_manager.rs
index 4d80a0464984..dc23f13f15b1 100644
--- a/src/common/grpc/src/channel_manager.rs
+++ b/src/common/grpc/src/channel_manager.rs
@@ -66,7 +66,7 @@ impl ChannelManager {
}
let pool = self.pool.clone();
- common_runtime::spawn_bg(async {
+ let _handle = common_runtime::spawn_bg(async {
recycle_channel_in_loop(pool, RECYCLE_CHANNEL_INTERVAL_SECS).await;
});
info!("Channel recycle is started, running in the background!");
@@ -398,7 +398,7 @@ impl Channel {
#[inline]
pub fn increase_access(&self) {
- self.access.fetch_add(1, Ordering::Relaxed);
+ let _ = self.access.fetch_add(1, Ordering::Relaxed);
}
}
@@ -427,7 +427,7 @@ impl Pool {
}
fn put(&self, addr: &str, channel: Channel) {
- self.channels.insert(addr.to_string(), channel);
+ let _ = self.channels.insert(addr.to_string(), channel);
}
fn retain_channel<F>(&self, f: F)
@@ -442,7 +442,7 @@ async fn recycle_channel_in_loop(pool: Arc<Pool>, interval_secs: u64) {
let mut interval = tokio::time::interval(Duration::from_secs(interval_secs));
loop {
- interval.tick().await;
+ let _ = interval.tick().await;
pool.retain_channel(|_, c| c.access.swap(0, Ordering::Relaxed) != 0)
}
}
diff --git a/src/common/grpc/src/writer.rs b/src/common/grpc/src/writer.rs
index c111d61b665c..75061c5fc6f5 100644
--- a/src/common/grpc/src/writer.rs
+++ b/src/common/grpc/src/writer.rs
@@ -217,7 +217,7 @@ impl LinesWriter {
datatype: datatype as i32,
null_mask: Vec::default(),
});
- column_names.insert(column_name.to_string(), new_idx);
+ let _ = column_names.insert(column_name.to_string(), new_idx);
new_idx
}
};
diff --git a/src/common/mem-prof/src/lib.rs b/src/common/mem-prof/src/lib.rs
index b87503ba7be8..de982ec876d0 100644
--- a/src/common/mem-prof/src/lib.rs
+++ b/src/common/mem-prof/src/lib.rs
@@ -62,7 +62,8 @@ pub async fn dump_profile() -> error::Result<Vec<u8>> {
.await
.context(OpenTempFileSnafu { path: &path })?;
let mut buf = vec![];
- f.read_to_end(&mut buf)
+ let _ = f
+ .read_to_end(&mut buf)
.await
.context(OpenTempFileSnafu { path })?;
Ok(buf)
diff --git a/src/common/meta/src/rpc/router.rs b/src/common/meta/src/rpc/router.rs
index 3ffcb4c92bac..1b463023a811 100644
--- a/src/common/meta/src/rpc/router.rs
+++ b/src/common/meta/src/rpc/router.rs
@@ -202,13 +202,13 @@ impl TableRoute {
.iter()
.filter_map(|x| x.leader_peer.as_ref())
.for_each(|p| {
- peers.insert(p.clone());
+ let _ = peers.insert(p.clone());
});
self.region_routes
.iter()
.flat_map(|x| x.follower_peers.iter())
.for_each(|p| {
- peers.insert(p.clone());
+ let _ = peers.insert(p.clone());
});
let mut peers = peers.into_iter().map(Into::into).collect::<Vec<PbPeer>>();
peers.sort_by_key(|x| x.id);
diff --git a/src/common/procedure/src/local.rs b/src/common/procedure/src/local.rs
index d432bccecc4e..c9366367bd2c 100644
--- a/src/common/procedure/src/local.rs
+++ b/src/common/procedure/src/local.rs
@@ -292,7 +292,7 @@ impl ManagerContext {
fn remove_messages(&self, procedure_ids: &[ProcedureId]) {
let mut messages = self.messages.lock().unwrap();
for procedure_id in procedure_ids {
- messages.remove(procedure_id);
+ let _ = messages.remove(procedure_id);
}
}
@@ -319,7 +319,7 @@ impl ManagerContext {
while let Some((id, finish_time)) = finished_procedures.front() {
if finish_time.elapsed() > ttl {
ids_to_remove.push(*id);
- finished_procedures.pop_front();
+ let _ = finished_procedures.pop_front();
} else {
// The rest procedures are finished later, so we can break
// the loop.
@@ -335,7 +335,7 @@ impl ManagerContext {
let mut procedures = self.procedures.write().unwrap();
for id in ids {
- procedures.remove(&id);
+ let _ = procedures.remove(&id);
}
}
}
@@ -419,7 +419,7 @@ impl LocalManager {
DuplicateProcedureSnafu { procedure_id },
);
- common_runtime::spawn_bg(async move {
+ let _handle = common_runtime::spawn_bg(async move {
// Run the root procedure.
runner.run().await;
});
@@ -434,7 +434,7 @@ impl ProcedureManager for LocalManager {
let mut loaders = self.manager_ctx.loaders.lock().unwrap();
ensure!(!loaders.contains_key(name), LoaderConflictSnafu { name });
- loaders.insert(name.to_string(), loader);
+ let _ = loaders.insert(name.to_string(), loader);
Ok(())
}
@@ -559,7 +559,7 @@ mod test_util {
pub(crate) fn new_object_store(dir: &TempDir) -> ObjectStore {
let store_dir = dir.path().to_str().unwrap();
let mut builder = Builder::default();
- builder.root(store_dir);
+ let _ = builder.root(store_dir);
ObjectStore::new(builder).unwrap().finish()
}
}
@@ -770,13 +770,13 @@ mod tests {
let mut procedure = ProcedureToLoad::new("submit");
procedure.lock_key = LockKey::single("test.submit");
- manager
+ assert!(manager
.submit(ProcedureWithId {
id: procedure_id,
procedure: Box::new(procedure),
})
.await
- .unwrap();
+ .is_ok());
assert!(manager
.procedure_state(procedure_id)
.await
@@ -877,13 +877,13 @@ mod tests {
let mut procedure = ProcedureToLoad::new("submit");
procedure.lock_key = LockKey::single("test.submit");
let procedure_id = ProcedureId::random();
- manager
+ assert!(manager
.submit(ProcedureWithId {
id: procedure_id,
procedure: Box::new(procedure),
})
.await
- .unwrap();
+ .is_ok());
let mut watcher = manager.procedure_watcher(procedure_id).unwrap();
watcher.changed().await.unwrap();
manager.start().unwrap();
@@ -899,13 +899,13 @@ mod tests {
let mut procedure = ProcedureToLoad::new("submit");
procedure.lock_key = LockKey::single("test.submit");
let procedure_id = ProcedureId::random();
- manager
+ assert!(manager
.submit(ProcedureWithId {
id: procedure_id,
procedure: Box::new(procedure),
})
.await
- .unwrap();
+ .is_ok());
let mut watcher = manager.procedure_watcher(procedure_id).unwrap();
watcher.changed().await.unwrap();
tokio::time::sleep(Duration::from_millis(10)).await;
diff --git a/src/common/procedure/src/local/lock.rs b/src/common/procedure/src/local/lock.rs
index 2a14425da42e..59e197d951bb 100644
--- a/src/common/procedure/src/local/lock.rs
+++ b/src/common/procedure/src/local/lock.rs
@@ -88,7 +88,7 @@ impl LockMap {
// expect that a procedure should not wait for two lock simultaneously.
lock.waiters.push_back(meta.clone());
} else {
- locks.insert(key.to_string(), Lock::from_owner(meta));
+ let _ = locks.insert(key.to_string(), Lock::from_owner(meta));
return;
}
@@ -111,7 +111,7 @@ impl LockMap {
if !lock.switch_owner() {
// No body waits for this lock, we can remove the lock entry.
- locks.remove(key);
+ let _ = locks.remove(key);
}
}
}
diff --git a/src/common/procedure/src/local/runner.rs b/src/common/procedure/src/local/runner.rs
index bc2f874afe04..0708b1838d73 100644
--- a/src/common/procedure/src/local/runner.rs
+++ b/src/common/procedure/src/local/runner.rs
@@ -332,7 +332,7 @@ impl Runner {
// Add the id of the subprocedure to the metadata.
self.meta.push_child(procedure_id);
- common_runtime::spawn_bg(async move {
+ let _handle = common_runtime::spawn_bg(async move {
// Run the root procedure.
runner.run().await
});
diff --git a/src/common/procedure/src/store.rs b/src/common/procedure/src/store.rs
index 1f37a980bfa4..154040a85309 100644
--- a/src/common/procedure/src/store.rs
+++ b/src/common/procedure/src/store.rs
@@ -198,7 +198,7 @@ impl ProcedureStore {
entry.1 = value;
}
} else {
- procedure_key_values.insert(curr_key.procedure_id, (curr_key, value));
+ let _ = procedure_key_values.insert(curr_key.procedure_id, (curr_key, value));
}
}
@@ -211,7 +211,7 @@ impl ProcedureStore {
// procedures are loaded.
continue;
};
- messages.insert(procedure_id, message);
+ let _ = messages.insert(procedure_id, message);
} else {
finished_ids.push(procedure_id);
}
@@ -331,7 +331,7 @@ mod tests {
fn procedure_store_for_test(dir: &TempDir) -> ProcedureStore {
let store_dir = dir.path().to_str().unwrap();
let mut builder = Builder::default();
- builder.root(store_dir);
+ let _ = builder.root(store_dir);
let object_store = ObjectStore::new(builder).unwrap().finish();
ProcedureStore::from_object_store(object_store)
diff --git a/src/common/procedure/src/store/state_store.rs b/src/common/procedure/src/store/state_store.rs
index 22686c80924b..a1ffb42238a0 100644
--- a/src/common/procedure/src/store/state_store.rs
+++ b/src/common/procedure/src/store/state_store.rs
@@ -173,7 +173,7 @@ mod tests {
let dir = create_temp_dir("state_store");
let store_dir = dir.path().to_str().unwrap();
let mut builder = Builder::default();
- builder.root(store_dir);
+ let _ = builder.root(store_dir);
let object_store = ObjectStore::new(builder).unwrap().finish();
let state_store = ObjectStateStore::new(object_store);
@@ -244,7 +244,7 @@ mod tests {
let dir = create_temp_dir("state_store_list");
let store_dir = dir.path().to_str().unwrap();
let mut builder = Builder::default();
- builder.root(store_dir);
+ let _ = builder.root(store_dir);
let object_store = ObjectStore::new(builder).unwrap().finish();
let state_store = ObjectStateStore::new(object_store);
diff --git a/src/common/recordbatch/src/recordbatch.rs b/src/common/recordbatch/src/recordbatch.rs
index c524840ff5c6..979bc5fd0e3c 100644
--- a/src/common/recordbatch/src/recordbatch.rs
+++ b/src/common/recordbatch/src/recordbatch.rs
@@ -164,7 +164,7 @@ impl RecordBatch {
vector.clone()
};
- vectors.insert(column_name.clone(), vector);
+ let _ = vectors.insert(column_name.clone(), vector);
}
Ok(vectors)
diff --git a/src/common/runtime/src/repeated_task.rs b/src/common/runtime/src/repeated_task.rs
index b5abd8e0d2d2..354c71054dad 100644
--- a/src/common/runtime/src/repeated_task.rs
+++ b/src/common/runtime/src/repeated_task.rs
@@ -172,8 +172,7 @@ mod tests {
}
async fn call(&mut self) -> Result<()> {
- self.n.fetch_add(1, Ordering::Relaxed);
-
+ let _ = self.n.fetch_add(1, Ordering::Relaxed);
Ok(())
}
}
diff --git a/src/common/runtime/src/runtime.rs b/src/common/runtime/src/runtime.rs
index 1112b9acb9e9..e5a044ede9c0 100644
--- a/src/common/runtime/src/runtime.rs
+++ b/src/common/runtime/src/runtime.rs
@@ -47,7 +47,7 @@ pub struct Dropper {
impl Drop for Dropper {
fn drop(&mut self) {
// Send a signal to say i am dropping.
- self.close.take().map(|v| v.send(()));
+ let _ = self.close.take().map(|v| v.send(()));
}
}
@@ -104,7 +104,7 @@ impl Builder {
///
/// This can be any number above 0. The default value is the number of cores available to the system.
pub fn worker_threads(&mut self, val: usize) -> &mut Self {
- self.builder.worker_threads(val);
+ let _ = self.builder.worker_threads(val);
self
}
@@ -114,7 +114,7 @@ impl Builder {
/// they are not always active and will exit if left idle for too long, You can change this timeout duration
/// with thread_keep_alive. The default value is 512.
pub fn max_blocking_threads(&mut self, val: usize) -> &mut Self {
- self.builder.max_blocking_threads(val);
+ let _ = self.builder.max_blocking_threads(val);
self
}
@@ -122,7 +122,7 @@ impl Builder {
///
/// By default, the timeout for a thread is set to 10 seconds.
pub fn thread_keep_alive(&mut self, duration: Duration) -> &mut Self {
- self.builder.thread_keep_alive(duration);
+ let _ = self.builder.thread_keep_alive(duration);
self
}
@@ -227,7 +227,7 @@ mod tests {
// wait threads created
thread::sleep(Duration::from_millis(50));
- runtime.spawn(async {
+ let _handle = runtime.spawn(async {
thread::sleep(Duration::from_millis(50));
});
@@ -247,7 +247,7 @@ mod tests {
let out = runtime.block_on(async {
let (tx, rx) = oneshot::channel();
- thread::spawn(move || {
+ let _ = thread::spawn(move || {
thread::sleep(Duration::from_millis(50));
tx.send("ZOMG").unwrap();
});
diff --git a/src/common/telemetry/src/panic_hook.rs b/src/common/telemetry/src/panic_hook.rs
index ca6881032118..d7a432c950cd 100644
--- a/src/common/telemetry/src/panic_hook.rs
+++ b/src/common/telemetry/src/panic_hook.rs
@@ -46,7 +46,7 @@ pub fn set_panic_hook() {
}));
#[cfg(feature = "deadlock_detection")]
- std::thread::spawn(move || loop {
+ let _ = std::thread::spawn(move || loop {
std::thread::sleep(Duration::from_secs(5));
let deadlocks = parking_lot::deadlock::check_deadlock();
if deadlocks.is_empty() {
diff --git a/src/common/time/src/timestamp.rs b/src/common/time/src/timestamp.rs
index 0d4b546b7557..f01a82cb0067 100644
--- a/src/common/time/src/timestamp.rs
+++ b/src/common/time/src/timestamp.rs
@@ -879,14 +879,14 @@ mod tests {
#[test]
fn test_split_overflow() {
- Timestamp::new(i64::MAX, TimeUnit::Second).split();
- Timestamp::new(i64::MIN, TimeUnit::Second).split();
- Timestamp::new(i64::MAX, TimeUnit::Millisecond).split();
- Timestamp::new(i64::MIN, TimeUnit::Millisecond).split();
- Timestamp::new(i64::MAX, TimeUnit::Microsecond).split();
- Timestamp::new(i64::MIN, TimeUnit::Microsecond).split();
- Timestamp::new(i64::MAX, TimeUnit::Nanosecond).split();
- Timestamp::new(i64::MIN, TimeUnit::Nanosecond).split();
+ let _ = Timestamp::new(i64::MAX, TimeUnit::Second).split();
+ let _ = Timestamp::new(i64::MIN, TimeUnit::Second).split();
+ let _ = Timestamp::new(i64::MAX, TimeUnit::Millisecond).split();
+ let _ = Timestamp::new(i64::MIN, TimeUnit::Millisecond).split();
+ let _ = Timestamp::new(i64::MAX, TimeUnit::Microsecond).split();
+ let _ = Timestamp::new(i64::MIN, TimeUnit::Microsecond).split();
+ let _ = Timestamp::new(i64::MAX, TimeUnit::Nanosecond).split();
+ let _ = Timestamp::new(i64::MIN, TimeUnit::Nanosecond).split();
let (sec, nsec) = Timestamp::new(i64::MIN, TimeUnit::Nanosecond).split();
let time = NaiveDateTime::from_timestamp_opt(sec, nsec).unwrap();
assert_eq!(sec, time.timestamp());
diff --git a/src/datanode/src/heartbeat.rs b/src/datanode/src/heartbeat.rs
index 6c9e3e036513..39240512b53f 100644
--- a/src/datanode/src/heartbeat.rs
+++ b/src/datanode/src/heartbeat.rs
@@ -89,7 +89,7 @@ impl HeartbeatTask {
let client_id = meta_client.id();
let (tx, mut rx) = meta_client.heartbeat().await.context(MetaClientInitSnafu)?;
- common_runtime::spawn_bg(async move {
+ let _handle = common_runtime::spawn_bg(async move {
while let Some(res) = match rx.message().await {
Ok(m) => m,
Err(e) => {
@@ -160,7 +160,7 @@ impl HeartbeatTask {
.await?;
let epoch = self.region_alive_keepers.epoch();
- common_runtime::spawn_bg(async move {
+ let _handle = common_runtime::spawn_bg(async move {
let sleep = tokio::time::sleep(Duration::from_millis(0));
tokio::pin!(sleep);
diff --git a/src/datanode/src/heartbeat/handler/close_region.rs b/src/datanode/src/heartbeat/handler/close_region.rs
index 6ae4a2a6fd6a..aa3f9a57062d 100644
--- a/src/datanode/src/heartbeat/handler/close_region.rs
+++ b/src/datanode/src/heartbeat/handler/close_region.rs
@@ -56,7 +56,7 @@ impl HeartbeatResponseHandler for CloseRegionHandler {
let mailbox = ctx.mailbox.clone();
let self_ref = Arc::new(self.clone());
- common_runtime::spawn_bg(async move {
+ let _handle = common_runtime::spawn_bg(async move {
let result = self_ref.close_region_inner(region_ident).await;
if let Err(e) = mailbox
@@ -191,7 +191,8 @@ impl CloseRegionHandler {
// Deregister table if The table released.
self.deregister_table(table_ref).await?;
- self.region_alive_keepers
+ let _ = self
+ .region_alive_keepers
.deregister_table(table_ident)
.await;
diff --git a/src/datanode/src/heartbeat/handler/open_region.rs b/src/datanode/src/heartbeat/handler/open_region.rs
index e56116a48ff5..a038b85254ab 100644
--- a/src/datanode/src/heartbeat/handler/open_region.rs
+++ b/src/datanode/src/heartbeat/handler/open_region.rs
@@ -58,7 +58,7 @@ impl HeartbeatResponseHandler for OpenRegionHandler {
let self_ref = Arc::new(self.clone());
let region_alive_keepers = self.region_alive_keepers.clone();
- common_runtime::spawn_bg(async move {
+ let _handle = common_runtime::spawn_bg(async move {
let table_ident = ®ion_ident.table_ident;
let request = OpenTableRequest {
catalog_name: table_ident.catalog.clone(),
diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs
index 1b23e13b4462..148f4fb802f5 100644
--- a/src/datanode/src/instance.rs
+++ b/src/datanode/src/instance.rs
@@ -177,21 +177,21 @@ impl Instance {
object_store.clone(),
));
- let mut engine_procedures = HashMap::with_capacity(2);
- engine_procedures.insert(
- mito_engine.name().to_string(),
- mito_engine.clone() as TableEngineProcedureRef,
- );
-
let immutable_file_engine = Arc::new(ImmutableFileTableEngine::new(
file_table_engine::config::EngineConfig::default(),
object_store.clone(),
));
- engine_procedures.insert(
- immutable_file_engine.name().to_string(),
- immutable_file_engine.clone() as TableEngineProcedureRef,
- );
+ let engine_procedures = HashMap::from([
+ (
+ mito_engine.name().to_string(),
+ mito_engine.clone() as TableEngineProcedureRef,
+ ),
+ (
+ immutable_file_engine.name().to_string(),
+ immutable_file_engine.clone() as TableEngineProcedureRef,
+ ),
+ ]);
let engine_manager = Arc::new(
MemoryTableEngineManager::with(vec![
mito_engine.clone(),
@@ -207,7 +207,7 @@ impl Instance {
let catalog = Arc::new(catalog::local::MemoryCatalogManager::default());
let table = NumbersTable::new(MIN_USER_TABLE_ID);
- catalog
+ let _ = catalog
.register_table(RegisterTableRequest {
table_id: MIN_USER_TABLE_ID,
table_name: table.table_info().name.to_string(),
@@ -376,7 +376,7 @@ impl Instance {
.map_err(BoxedError::new)
.context(ShutdownInstanceSnafu);
info!("Flushed all tables result: {}", flush_result.is_ok());
- flush_result?;
+ let _ = flush_result?;
Ok(())
}
diff --git a/src/datanode/src/instance/grpc.rs b/src/datanode/src/instance/grpc.rs
index adc3dbc23b6d..cae4adf66b0f 100644
--- a/src/datanode/src/instance/grpc.rs
+++ b/src/datanode/src/instance/grpc.rs
@@ -281,11 +281,11 @@ async fn new_dummy_catalog_list(
)
.await?;
let catalog_provider = MemoryCatalogProvider::new();
- catalog_provider
+ assert!(catalog_provider
.register_schema(schema_name, Arc::new(schema_provider) as Arc<_>)
- .unwrap();
+ .is_ok());
let catalog_list = MemoryCatalogList::new();
- catalog_list.register_catalog(
+ let _ = catalog_list.register_catalog(
catalog_name.to_string(),
Arc::new(catalog_provider) as Arc<_>,
);
@@ -438,9 +438,12 @@ mod test {
async fn test_handle_insert() {
let instance = MockInstance::new("test_handle_insert").await;
let instance = instance.inner();
- test_util::create_test_table(instance, ConcreteDataType::timestamp_millisecond_datatype())
- .await
- .unwrap();
+ assert!(test_util::create_test_table(
+ instance,
+ ConcreteDataType::timestamp_millisecond_datatype()
+ )
+ .await
+ .is_ok());
let insert = InsertRequest {
table_name: "demo".to_string(),
@@ -508,9 +511,12 @@ mod test {
async fn test_handle_delete() {
let instance = MockInstance::new("test_handle_delete").await;
let instance = instance.inner();
- test_util::create_test_table(instance, ConcreteDataType::timestamp_millisecond_datatype())
- .await
- .unwrap();
+ assert!(test_util::create_test_table(
+ instance,
+ ConcreteDataType::timestamp_millisecond_datatype()
+ )
+ .await
+ .is_ok());
let query = GrpcRequest::Query(QueryRequest {
query: Some(Query::Sql(
@@ -574,9 +580,12 @@ mod test {
async fn test_handle_query() {
let instance = MockInstance::new("test_handle_query").await;
let instance = instance.inner();
- test_util::create_test_table(instance, ConcreteDataType::timestamp_millisecond_datatype())
- .await
- .unwrap();
+ assert!(test_util::create_test_table(
+ instance,
+ ConcreteDataType::timestamp_millisecond_datatype()
+ )
+ .await
+ .is_ok());
let query = GrpcRequest::Query(QueryRequest {
query: Some(Query::Sql(
diff --git a/src/datanode/src/server.rs b/src/datanode/src/server.rs
index db0c41e466db..64bed8eb3486 100644
--- a/src/datanode/src/server.rs
+++ b/src/datanode/src/server.rs
@@ -72,7 +72,7 @@ impl Services {
})?;
let grpc = self.grpc_server.start(grpc_addr);
let http = self.http_server.start(http_addr);
- future::try_join_all(vec![grpc, http])
+ let _ = future::try_join_all(vec![grpc, http])
.await
.context(StartServerSnafu)?;
diff --git a/src/datanode/src/sql/create.rs b/src/datanode/src/sql/create.rs
index 36b9a9aa429f..b5e7fb3b7d07 100644
--- a/src/datanode/src/sql/create.rs
+++ b/src/datanode/src/sql/create.rs
@@ -63,7 +63,8 @@ impl SqlHandler {
catalog,
schema: schema.clone(),
};
- self.catalog_manager
+ let _ = self
+ .catalog_manager
.register_schema(reg_req)
.await
.context(RegisterSchemaSnafu)?;
diff --git a/src/datanode/src/sql/create_external.rs b/src/datanode/src/sql/create_external.rs
index 275398dbcab0..a42212b3befc 100644
--- a/src/datanode/src/sql/create_external.rs
+++ b/src/datanode/src/sql/create_external.rs
@@ -38,7 +38,7 @@ impl SqlHandler {
.context(error::PrepareImmutableTableSnafu)?;
let meta = ImmutableFileTableOptions { files };
- options.insert(
+ let _ = options.insert(
IMMUTABLE_TABLE_META_KEY.to_string(),
serde_json::to_string(&meta).context(error::EncodeJsonSnafu)?,
);
diff --git a/src/datanode/src/sql/flush_table.rs b/src/datanode/src/sql/flush_table.rs
index 3951caa944ab..bfdc76fe72f9 100644
--- a/src/datanode/src/sql/flush_table.rs
+++ b/src/datanode/src/sql/flush_table.rs
@@ -38,7 +38,7 @@ impl SqlHandler {
.table_names(&req.catalog_name, &req.schema_name)
.await
.context(CatalogSnafu)?;
- futures::future::join_all(all_table_names.iter().map(|table| {
+ let _ = futures::future::join_all(all_table_names.iter().map(|table| {
self.flush_table_inner(
&self.catalog_manager,
&req.catalog_name,
diff --git a/src/datanode/src/store/azblob.rs b/src/datanode/src/store/azblob.rs
index 40497fd38c56..9c35c04117ca 100644
--- a/src/datanode/src/store/azblob.rs
+++ b/src/datanode/src/store/azblob.rs
@@ -30,7 +30,7 @@ pub(crate) async fn new_azblob_object_store(azblob_config: &AzblobConfig) -> Res
);
let mut builder = AzureBuilder::default();
- builder
+ let _ = builder
.root(&root)
.container(&azblob_config.container)
.endpoint(&azblob_config.endpoint)
@@ -38,7 +38,7 @@ pub(crate) async fn new_azblob_object_store(azblob_config: &AzblobConfig) -> Res
.account_key(azblob_config.account_key.expose_secret());
if let Some(token) = &azblob_config.sas_token {
- builder.sas_token(token);
+ let _ = builder.sas_token(token);
}
Ok(ObjectStore::new(builder)
diff --git a/src/datanode/src/store/fs.rs b/src/datanode/src/store/fs.rs
index 34af23c9f77a..bb5c64d30867 100644
--- a/src/datanode/src/store/fs.rs
+++ b/src/datanode/src/store/fs.rs
@@ -33,7 +33,7 @@ pub(crate) async fn new_fs_object_store(file_config: &FileConfig) -> Result<Obje
store::clean_temp_dir(&atomic_write_dir)?;
let mut builder = FsBuilder::default();
- builder.root(&data_home).atomic_write_dir(&atomic_write_dir);
+ let _ = builder.root(&data_home).atomic_write_dir(&atomic_write_dir);
let object_store = ObjectStore::new(builder)
.context(error::InitBackendSnafu)?
diff --git a/src/datanode/src/store/oss.rs b/src/datanode/src/store/oss.rs
index c7f12ff352a2..d689313305f7 100644
--- a/src/datanode/src/store/oss.rs
+++ b/src/datanode/src/store/oss.rs
@@ -29,7 +29,7 @@ pub(crate) async fn new_oss_object_store(oss_config: &OssConfig) -> Result<Objec
);
let mut builder = OSSBuilder::default();
- builder
+ let _ = builder
.root(&root)
.bucket(&oss_config.bucket)
.endpoint(&oss_config.endpoint)
diff --git a/src/datanode/src/store/s3.rs b/src/datanode/src/store/s3.rs
index 14b33c25cb42..3b65fbb200df 100644
--- a/src/datanode/src/store/s3.rs
+++ b/src/datanode/src/store/s3.rs
@@ -30,17 +30,17 @@ pub(crate) async fn new_s3_object_store(s3_config: &S3Config) -> Result<ObjectSt
);
let mut builder = S3Builder::default();
- builder
+ let _ = builder
.root(&root)
.bucket(&s3_config.bucket)
.access_key_id(s3_config.access_key_id.expose_secret())
.secret_access_key(s3_config.secret_access_key.expose_secret());
if s3_config.endpoint.is_some() {
- builder.endpoint(s3_config.endpoint.as_ref().unwrap());
+ let _ = builder.endpoint(s3_config.endpoint.as_ref().unwrap());
}
if s3_config.region.is_some() {
- builder.region(s3_config.region.as_ref().unwrap());
+ let _ = builder.region(s3_config.region.as_ref().unwrap());
}
Ok(ObjectStore::new(builder)
diff --git a/src/datanode/src/tests.rs b/src/datanode/src/tests.rs
index 5b4ba4de3d5f..d0c4e23d660c 100644
--- a/src/datanode/src/tests.rs
+++ b/src/datanode/src/tests.rs
@@ -71,7 +71,7 @@ async fn test_close_region_handler() {
),
)]));
- prepare_table(instance.inner()).await;
+ let _ = prepare_table(instance.inner()).await;
// Closes demo table
handle_instruction(
diff --git a/src/datanode/src/tests/test_util.rs b/src/datanode/src/tests/test_util.rs
index 9be758439eb8..6f606658f0fc 100644
--- a/src/datanode/src/tests/test_util.rs
+++ b/src/datanode/src/tests/test_util.rs
@@ -136,6 +136,6 @@ pub(crate) async fn create_test_table(
table_id: table.table_info().ident.table_id,
table: table.clone(),
};
- instance.catalog_manager.register_table(req).await.unwrap();
+ assert!(instance.catalog_manager.register_table(req).await.is_ok());
Ok(table)
}
diff --git a/src/datatypes/src/schema.rs b/src/datatypes/src/schema.rs
index 525ff5e10b0d..b79641b9368a 100644
--- a/src/datatypes/src/schema.rs
+++ b/src/datatypes/src/schema.rs
@@ -202,7 +202,7 @@ impl SchemaBuilder {
///
/// Old metadata with same key would be overwritten.
pub fn add_metadata(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
- self.metadata.insert(key.into(), value.into());
+ let _ = self.metadata.insert(key.into(), value.into());
self
}
@@ -211,7 +211,8 @@ impl SchemaBuilder {
validate_timestamp_index(&self.column_schemas, timestamp_index)?;
}
- self.metadata
+ let _ = self
+ .metadata
.insert(VERSION_KEY.to_string(), self.version.to_string());
let arrow_schema = ArrowSchema::new(self.fields).with_metadata(self.metadata);
@@ -242,7 +243,7 @@ fn collect_fields(column_schemas: &[ColumnSchema]) -> Result<FieldsAndIndices> {
}
let field = Field::try_from(column_schema)?;
fields.push(field);
- name_to_index.insert(column_schema.name.clone(), index);
+ let _ = name_to_index.insert(column_schema.name.clone(), index);
}
Ok(FieldsAndIndices {
@@ -287,7 +288,7 @@ impl TryFrom<Arc<ArrowSchema>> for Schema {
let mut name_to_index = HashMap::with_capacity(arrow_schema.fields.len());
for field in &arrow_schema.fields {
let column_schema = ColumnSchema::try_from(field.as_ref())?;
- name_to_index.insert(field.name().to_string(), column_schemas.len());
+ let _ = name_to_index.insert(field.name().to_string(), column_schemas.len());
column_schemas.push(column_schema);
}
diff --git a/src/datatypes/src/schema/column_schema.rs b/src/datatypes/src/schema/column_schema.rs
index 35ba2cd49be4..04a9a63062ff 100644
--- a/src/datatypes/src/schema/column_schema.rs
+++ b/src/datatypes/src/schema/column_schema.rs
@@ -87,10 +87,11 @@ impl ColumnSchema {
pub fn with_time_index(mut self, is_time_index: bool) -> Self {
self.is_time_index = is_time_index;
if is_time_index {
- self.metadata
+ let _ = self
+ .metadata
.insert(TIME_INDEX_KEY.to_string(), "true".to_string());
} else {
- self.metadata.remove(TIME_INDEX_KEY);
+ let _ = self.metadata.remove(TIME_INDEX_KEY);
}
self
}
@@ -266,8 +267,7 @@ mod tests {
#[test]
fn test_column_schema_with_metadata() {
- let mut metadata = Metadata::new();
- metadata.insert("k1".to_string(), "v1".to_string());
+ let metadata = Metadata::from([("k1".to_string(), "v1".to_string())]);
let column_schema = ColumnSchema::new("test", ConcreteDataType::int32_datatype(), true)
.with_metadata(metadata)
.with_default_constraint(Some(ColumnDefaultConstraint::null_value()))
@@ -288,20 +288,21 @@ mod tests {
#[test]
fn test_column_schema_with_duplicate_metadata() {
- let mut metadata = Metadata::new();
- metadata.insert(DEFAULT_CONSTRAINT_KEY.to_string(), "v1".to_string());
+ let metadata = Metadata::from([(DEFAULT_CONSTRAINT_KEY.to_string(), "v1".to_string())]);
let column_schema = ColumnSchema::new("test", ConcreteDataType::int32_datatype(), true)
.with_metadata(metadata)
.with_default_constraint(Some(ColumnDefaultConstraint::null_value()))
.unwrap();
- Field::try_from(&column_schema).unwrap_err();
+ assert!(Field::try_from(&column_schema).is_err());
}
#[test]
fn test_column_schema_invalid_default_constraint() {
- ColumnSchema::new("test", ConcreteDataType::int32_datatype(), false)
- .with_default_constraint(Some(ColumnDefaultConstraint::null_value()))
- .unwrap_err();
+ assert!(
+ ColumnSchema::new("test", ConcreteDataType::int32_datatype(), false)
+ .with_default_constraint(Some(ColumnDefaultConstraint::null_value()))
+ .is_err()
+ );
}
#[test]
diff --git a/src/datatypes/src/schema/constraint.rs b/src/datatypes/src/schema/constraint.rs
index 63a1a3e07861..e63d8748fceb 100644
--- a/src/datatypes/src/schema/constraint.rs
+++ b/src/datatypes/src/schema/constraint.rs
@@ -196,7 +196,7 @@ mod tests {
fn test_validate_null_constraint() {
let constraint = ColumnDefaultConstraint::null_value();
let data_type = ConcreteDataType::int32_datatype();
- constraint.validate(&data_type, false).unwrap_err();
+ assert!(constraint.validate(&data_type, false).is_err());
constraint.validate(&data_type, true).unwrap();
}
@@ -207,9 +207,9 @@ mod tests {
constraint.validate(&data_type, false).unwrap();
constraint.validate(&data_type, true).unwrap();
- constraint
+ assert!(constraint
.validate(&ConcreteDataType::uint32_datatype(), true)
- .unwrap_err();
+ .is_err());
}
#[test]
@@ -218,23 +218,23 @@ mod tests {
constraint
.validate(&ConcreteDataType::timestamp_millisecond_datatype(), false)
.unwrap();
- constraint
+ assert!(constraint
.validate(&ConcreteDataType::boolean_datatype(), false)
- .unwrap_err();
+ .is_err());
let constraint = ColumnDefaultConstraint::Function("hello()".to_string());
- constraint
+ assert!(constraint
.validate(&ConcreteDataType::timestamp_millisecond_datatype(), false)
- .unwrap_err();
+ .is_err());
}
#[test]
fn test_create_default_vector_by_null() {
let constraint = ColumnDefaultConstraint::null_value();
let data_type = ConcreteDataType::int32_datatype();
- constraint
+ assert!(constraint
.create_default_vector(&data_type, false, 10)
- .unwrap_err();
+ .is_err());
let constraint = ColumnDefaultConstraint::null_value();
let v = constraint
@@ -286,9 +286,9 @@ mod tests {
let constraint = ColumnDefaultConstraint::Function("no".to_string());
let data_type = ConcreteDataType::timestamp_millisecond_datatype();
- constraint
+ assert!(constraint
.create_default_vector(&data_type, false, 4)
- .unwrap_err();
+ .is_err());
}
#[test]
diff --git a/src/datatypes/src/value.rs b/src/datatypes/src/value.rs
index 319da1066d00..d436c3934c3e 100644
--- a/src/datatypes/src/value.rs
+++ b/src/datatypes/src/value.rs
@@ -1006,10 +1006,10 @@ mod tests {
);
let result: Result<Value> = ScalarValue::Decimal128(Some(1), 0, 0).try_into();
- result
+ assert!(result
.unwrap_err()
.to_string()
- .contains("Unsupported arrow data type, type: Decimal(0, 0)");
+ .contains("Unsupported arrow data type, type: Decimal128(0, 0)"));
}
#[test]
diff --git a/src/datatypes/src/vectors/null.rs b/src/datatypes/src/vectors/null.rs
index d23862ac0a7b..c10a1b3dc55c 100644
--- a/src/datatypes/src/vectors/null.rs
+++ b/src/datatypes/src/vectors/null.rs
@@ -176,7 +176,7 @@ impl MutableVector for NullVectorBuilder {
}
fn extend_slice_of(&mut self, vector: &dyn Vector, offset: usize, length: usize) -> Result<()> {
- vector
+ let _ = vector
.as_any()
.downcast_ref::<NullVector>()
.with_context(|| error::CastTypeSnafu {
diff --git a/src/datatypes/src/vectors/operations/take.rs b/src/datatypes/src/vectors/operations/take.rs
index d457a1dbe366..87c36ec7c341 100644
--- a/src/datatypes/src/vectors/operations/take.rs
+++ b/src/datatypes/src/vectors/operations/take.rs
@@ -148,7 +148,7 @@ mod tests {
fn test_take_out_of_index() {
let v = Int32Vector::from_slice([1, 2, 3, 4, 5]);
let indies = UInt32Vector::from_slice([1, 5, 6]);
- v.take(&indies).unwrap();
+ let _ = v.take(&indies);
}
#[test]
diff --git a/src/file-table-engine/src/engine/immutable.rs b/src/file-table-engine/src/engine/immutable.rs
index 0101824b8ca0..e2c23b1425e3 100644
--- a/src/file-table-engine/src/engine/immutable.rs
+++ b/src/file-table-engine/src/engine/immutable.rs
@@ -275,7 +275,7 @@ impl EngineInner {
table_id
);
- self.tables.write().unwrap().insert(table_id, table.clone());
+ let _ = self.tables.write().unwrap().insert(table_id, table.clone());
Ok(table)
}
@@ -339,7 +339,7 @@ impl EngineInner {
.context(table_error::TableOperationSnafu)?,
);
- self.tables.write().unwrap().insert(table_id, table.clone());
+ let _ = self.tables.write().unwrap().insert(table_id, table.clone());
Some(table as _)
};
@@ -375,7 +375,7 @@ impl EngineInner {
.context(DropTableSnafu {
table_name: &table_full_name,
})?;
- self.tables.write().unwrap().remove(&req.table_id);
+ let _ = self.tables.write().unwrap().remove(&req.table_id);
Ok(true)
} else {
@@ -388,7 +388,7 @@ impl EngineInner {
let tables = self.tables.read().unwrap().clone();
- futures::future::try_join_all(tables.values().map(|t| t.close(&[])))
+ let _ = futures::future::try_join_all(tables.values().map(|t| t.close(&[])))
.await
.map_err(BoxedError::new)
.context(table_error::TableOperationSnafu)?;
@@ -427,7 +427,7 @@ impl EngineInner {
.context(table_error::TableOperationSnafu)?;
}
- self.tables.write().unwrap().remove(&table_id);
+ let _ = self.tables.write().unwrap().remove(&table_id);
Ok(())
}
diff --git a/src/file-table-engine/src/engine/procedure/create.rs b/src/file-table-engine/src/engine/procedure/create.rs
index b99aacea1781..fdb660b81fee 100644
--- a/src/file-table-engine/src/engine/procedure/create.rs
+++ b/src/file-table-engine/src/engine/procedure/create.rs
@@ -118,7 +118,8 @@ impl CreateImmutableFileTable {
return Ok(Status::Done);
}
- self.engine
+ let _ = self
+ .engine
.create_table(&engine_ctx, self.data.request.clone())
.await
.map_err(Error::from_error_ext)?;
diff --git a/src/file-table-engine/src/engine/procedure/drop.rs b/src/file-table-engine/src/engine/procedure/drop.rs
index 274a5d8c0db3..c4d16b738f30 100644
--- a/src/file-table-engine/src/engine/procedure/drop.rs
+++ b/src/file-table-engine/src/engine/procedure/drop.rs
@@ -42,7 +42,8 @@ impl Procedure for DropImmutableFileTable {
let engine_ctx = EngineContext::default();
// Currently, `drop_table()` of ImmutableFileTableEngine is idempotent so we just
// invoke it.
- self.engine
+ let _ = self
+ .engine
.drop_table(&engine_ctx, self.data.request.clone())
.await
.map_err(Error::from_error_ext)?;
diff --git a/src/file-table-engine/src/test_util.rs b/src/file-table-engine/src/test_util.rs
index 8b908424bd96..5eb561993907 100644
--- a/src/file-table-engine/src/test_util.rs
+++ b/src/file-table-engine/src/test_util.rs
@@ -36,7 +36,7 @@ pub fn new_test_object_store(prefix: &str) -> (TempDir, ObjectStore) {
let dir = create_temp_dir(prefix);
let store_dir = dir.path().to_string_lossy();
let mut builder = Fs::default();
- builder.root(&store_dir);
+ let _ = builder.root(&store_dir);
(dir, ObjectStore::new(builder).unwrap().finish())
}
@@ -97,15 +97,15 @@ pub struct TestEngineComponents {
pub fn new_create_request(schema: SchemaRef) -> CreateTableRequest {
let mut table_options = TableOptions::default();
- table_options.extra_options.insert(
+ let _ = table_options.extra_options.insert(
requests::IMMUTABLE_TABLE_LOCATION_KEY.to_string(),
"mock_path".to_string(),
);
- table_options.extra_options.insert(
+ let _ = table_options.extra_options.insert(
requests::IMMUTABLE_TABLE_META_KEY.to_string(),
serde_json::to_string(&ImmutableFileTableOptions::default()).unwrap(),
);
- table_options.extra_options.insert(
+ let _ = table_options.extra_options.insert(
requests::IMMUTABLE_TABLE_FORMAT_KEY.to_string(),
"csv".to_string(),
);
diff --git a/src/frontend/src/catalog.rs b/src/frontend/src/catalog.rs
index 05d6b2e8281a..69aa5cf0cc4c 100644
--- a/src/frontend/src/catalog.rs
+++ b/src/frontend/src/catalog.rs
@@ -257,7 +257,7 @@ impl CatalogManager for FrontendCatalogManager {
let Kv(k, _) = r?;
let catalog_key = String::from_utf8_lossy(&k);
if let Ok(key) = CatalogKey::parse(catalog_key.as_ref()) {
- res.insert(key.catalog_name);
+ let _ = res.insert(key.catalog_name);
} else {
warn!("invalid catalog key: {:?}", catalog_key);
}
@@ -273,7 +273,7 @@ impl CatalogManager for FrontendCatalogManager {
let Kv(k, _) = r?;
let key =
SchemaKey::parse(String::from_utf8_lossy(&k)).context(InvalidCatalogValueSnafu)?;
- res.insert(key.schema_name);
+ let _ = res.insert(key.schema_name);
}
Ok(res.into_iter().collect())
}
diff --git a/src/frontend/src/expr_factory.rs b/src/frontend/src/expr_factory.rs
index fbc8d6926b95..5eafb7acfbe9 100644
--- a/src/frontend/src/expr_factory.rs
+++ b/src/frontend/src/expr_factory.rs
@@ -99,7 +99,7 @@ pub(crate) async fn create_external_expr(
.context(error::PrepareImmutableTableSnafu)?;
let meta = ImmutableFileTableOptions { files };
- options.insert(
+ let _ = options.insert(
IMMUTABLE_TABLE_META_KEY.to_string(),
serde_json::to_string(&meta).context(error::EncodeJsonSnafu)?,
);
diff --git a/src/frontend/src/heartbeat.rs b/src/frontend/src/heartbeat.rs
index edf608573bba..3860f93bcc56 100644
--- a/src/frontend/src/heartbeat.rs
+++ b/src/frontend/src/heartbeat.rs
@@ -78,7 +78,7 @@ impl HeartbeatTask {
let capture_self = self.clone();
let retry_interval = self.retry_interval;
- common_runtime::spawn_bg(async move {
+ let _handle = common_runtime::spawn_bg(async move {
loop {
match resp_stream.message().await {
Ok(Some(resp)) => {
@@ -109,7 +109,7 @@ impl HeartbeatTask {
) {
let report_interval = self.report_interval;
- common_runtime::spawn_bg(async move {
+ let _handle = common_runtime::spawn_bg(async move {
let sleep = tokio::time::sleep(Duration::from_millis(0));
tokio::pin!(sleep);
diff --git a/src/frontend/src/heartbeat/handler/invalidate_table_cache.rs b/src/frontend/src/heartbeat/handler/invalidate_table_cache.rs
index e728a1f93953..38de5c20e6cd 100644
--- a/src/frontend/src/heartbeat/handler/invalidate_table_cache.rs
+++ b/src/frontend/src/heartbeat/handler/invalidate_table_cache.rs
@@ -55,7 +55,7 @@ impl HeartbeatResponseHandler for InvalidateTableCacheHandler {
..
} = table_ident;
- common_runtime::spawn_bg(async move {
+ let _handle = common_runtime::spawn_bg(async move {
self_ref.invalidate_table(&catalog, &schema, &table).await;
if let Err(e) = mailbox
diff --git a/src/frontend/src/heartbeat/handler/tests.rs b/src/frontend/src/heartbeat/handler/tests.rs
index e80b52ae77b5..411da893774b 100644
--- a/src/frontend/src/heartbeat/handler/tests.rs
+++ b/src/frontend/src/heartbeat/handler/tests.rs
@@ -39,7 +39,7 @@ pub struct MockKvCacheInvalidator {
#[async_trait::async_trait]
impl KvCacheInvalidator for MockKvCacheInvalidator {
async fn invalidate_key(&self, key: &[u8]) {
- self.inner.lock().unwrap().remove(key);
+ let _ = self.inner.lock().unwrap().remove(key);
}
}
@@ -50,7 +50,7 @@ pub struct MockTableRouteCacheInvalidator {
#[async_trait::async_trait]
impl TableRouteCacheInvalidator for MockTableRouteCacheInvalidator {
async fn invalidate_table_route(&self, table: &TableName) {
- self.inner.lock().unwrap().remove(&table.to_string());
+ let _ = self.inner.lock().unwrap().remove(&table.to_string());
}
}
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index ed70a2467190..cf80e10c713f 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -324,7 +324,8 @@ impl Instance {
"Table {}.{}.{} does not exist, try create table",
catalog_name, schema_name, table_name,
);
- self.create_table_by_columns(ctx, table_name, columns, MITO_ENGINE)
+ let _ = self
+ .create_table_by_columns(ctx, table_name, columns, MITO_ENGINE)
.await?;
info!(
"Successfully created table on insertion: {}.{}.{}",
@@ -343,7 +344,8 @@ impl Instance {
"Find new columns {:?} on insertion, try to alter table: {}.{}.{}",
add_columns, catalog_name, schema_name, table_name
);
- self.add_new_columns_to_table(ctx, table_name, add_columns)
+ let _ = self
+ .add_new_columns_to_table(ctx, table_name, add_columns)
.await?;
info!(
"Successfully altered table on insertion: {}.{}.{}",
@@ -810,10 +812,10 @@ mod tests {
}
fn do_fmt(template: &str, catalog: &str, schema: &str) -> String {
- let mut vars = HashMap::new();
- vars.insert("catalog".to_string(), catalog);
- vars.insert("schema".to_string(), schema);
-
+ let vars = HashMap::from([
+ ("catalog".to_string(), catalog),
+ ("schema".to_string(), schema),
+ ]);
template.format(&vars).unwrap()
}
diff --git a/src/frontend/src/instance/distributed.rs b/src/frontend/src/instance/distributed.rs
index cd976bf857ec..f0c961704933 100644
--- a/src/frontend/src/instance/distributed.rs
+++ b/src/frontend/src/instance/distributed.rs
@@ -213,7 +213,7 @@ impl DistInstance {
);
let _timer = common_telemetry::timer!(crate::metrics::DIST_CREATE_TABLE_IN_DATANODE);
- client
+ let _ = client
.create(create_expr_for_region)
.await
.context(RequestDatanodeSnafu)?;
@@ -277,7 +277,7 @@ impl DistInstance {
let client = self.datanode_clients.get_client(&datanode).await;
let client = Database::new(&expr.catalog_name, &expr.schema_name, client);
- client
+ let _ = client
.drop_table(expr.clone())
.await
.context(RequestDatanodeSnafu)?;
@@ -349,7 +349,7 @@ impl DistInstance {
let client = self.datanode_clients.get_client(&datanode).await;
let client = Database::new(&expr.catalog_name, &expr.schema_name, client);
- client
+ let _ = client
.flush_table(expr.clone())
.await
.context(RequestDatanodeSnafu)?;
@@ -378,7 +378,7 @@ impl DistInstance {
}
Statement::CreateExternalTable(stmt) => {
let create_expr = &mut expr_factory::create_external_expr(stmt, query_ctx).await?;
- self.create_table(create_expr, None).await?;
+ let _ = self.create_table(create_expr, None).await?;
Ok(Output::AffectedRows(0))
}
Statement::Alter(alter_table) => {
@@ -545,7 +545,7 @@ impl DistInstance {
let mut context = AlterContext::with_capacity(1);
- context.insert(expr);
+ let _ = context.insert(expr);
table.alter(context, &request).await.context(TableSnafu)?;
@@ -730,7 +730,7 @@ fn create_table_info(create_table: &CreateTableExpr) -> Result<RawTableInfo> {
let schema = schema.with_time_index(column.name == create_table.time_index);
column_schemas.push(schema);
- column_name_to_index_map.insert(column.name.clone(), idx);
+ let _ = column_name_to_index_map.insert(column.name.clone(), idx);
}
let timestamp_index = column_name_to_index_map
diff --git a/src/frontend/src/instance/distributed/inserter.rs b/src/frontend/src/instance/distributed/inserter.rs
index 896247193e30..87761e3ebed8 100644
--- a/src/frontend/src/instance/distributed/inserter.rs
+++ b/src/frontend/src/instance/distributed/inserter.rs
@@ -240,11 +240,12 @@ mod tests {
ColumnSchema::new("a", ConcreteDataType::int32_datatype(), true),
]));
- let mut builder = TableMetaBuilder::default();
- builder.schema(schema);
- builder.primary_key_indices(vec![]);
- builder.next_column_id(1);
- let table_meta = builder.build().unwrap();
+ let table_meta = TableMetaBuilder::default()
+ .schema(schema)
+ .primary_key_indices(vec![])
+ .next_column_id(1)
+ .build()
+ .unwrap();
let table_info = TableInfoBuilder::new(table_name, table_meta)
.build()
diff --git a/src/frontend/src/instance/influxdb.rs b/src/frontend/src/instance/influxdb.rs
index 7256f587f2f9..711e68fcab9e 100644
--- a/src/frontend/src/instance/influxdb.rs
+++ b/src/frontend/src/instance/influxdb.rs
@@ -29,7 +29,8 @@ impl InfluxdbLineProtocolHandler for Instance {
ctx: QueryContextRef,
) -> servers::error::Result<()> {
let requests = request.try_into()?;
- self.handle_inserts(requests, ctx)
+ let _ = self
+ .handle_inserts(requests, ctx)
.await
.map_err(BoxedError::new)
.context(servers::error::ExecuteGrpcQuerySnafu)?;
diff --git a/src/frontend/src/instance/opentsdb.rs b/src/frontend/src/instance/opentsdb.rs
index 7e388408d978..9eea4c32d4c9 100644
--- a/src/frontend/src/instance/opentsdb.rs
+++ b/src/frontend/src/instance/opentsdb.rs
@@ -29,7 +29,8 @@ impl OpentsdbProtocolHandler for Instance {
let requests = InsertRequests {
inserts: vec![data_point.as_grpc_insert()],
};
- self.handle_inserts(requests, ctx)
+ let _ = self
+ .handle_inserts(requests, ctx)
.await
.map_err(BoxedError::new)
.with_context(|_| server_error::ExecuteQuerySnafu {
diff --git a/src/frontend/src/instance/prometheus.rs b/src/frontend/src/instance/prometheus.rs
index 3d85757a0427..fe30ec3c9385 100644
--- a/src/frontend/src/instance/prometheus.rs
+++ b/src/frontend/src/instance/prometheus.rs
@@ -147,7 +147,8 @@ impl Instance {
impl PrometheusProtocolHandler for Instance {
async fn write(&self, request: WriteRequest, ctx: QueryContextRef) -> ServerResult<()> {
let (requests, samples) = prometheus::to_grpc_insert_requests(request)?;
- self.handle_inserts(requests, ctx)
+ let _ = self
+ .handle_inserts(requests, ctx)
.await
.map_err(BoxedError::new)
.context(error::ExecuteGrpcQuerySnafu)?;
diff --git a/src/frontend/src/server.rs b/src/frontend/src/server.rs
index 9ab3a45625bd..e817973e6395 100644
--- a/src/frontend/src/server.rs
+++ b/src/frontend/src/server.rs
@@ -153,36 +153,36 @@ impl Services {
let http_addr = parse_addr(&http_options.addr)?;
let mut http_server_builder = HttpServerBuilder::new(http_options.clone());
- http_server_builder
+ let _ = http_server_builder
.with_sql_handler(ServerSqlQueryHandlerAdaptor::arc(instance.clone()))
.with_grpc_handler(ServerGrpcQueryHandlerAdaptor::arc(instance.clone()));
if let Some(user_provider) = user_provider.clone() {
- http_server_builder.with_user_provider(user_provider);
+ let _ = http_server_builder.with_user_provider(user_provider);
}
if set_opentsdb_handler {
- http_server_builder.with_opentsdb_handler(instance.clone());
+ let _ = http_server_builder.with_opentsdb_handler(instance.clone());
}
if matches!(
opts.influxdb_options,
Some(InfluxdbOptions { enable: true })
) {
- http_server_builder.with_influxdb_handler(instance.clone());
+ let _ = http_server_builder.with_influxdb_handler(instance.clone());
}
if matches!(
opts.prometheus_options,
Some(PrometheusOptions { enable: true })
) {
- http_server_builder.with_prom_handler(instance.clone());
+ let _ = http_server_builder.with_prom_handler(instance.clone());
}
- http_server_builder.with_metrics_handler(MetricsHandler);
- http_server_builder.with_script_handler(instance.clone());
- http_server_builder.with_configurator(plugins.get::<ConfiguratorRef>());
-
- let http_server = http_server_builder.build();
+ let http_server = http_server_builder
+ .with_metrics_handler(MetricsHandler)
+ .with_script_handler(instance.clone())
+ .with_configurator(plugins.get::<ConfiguratorRef>())
+ .build();
result.push((Box::new(http_server), http_addr));
}
diff --git a/src/frontend/src/table.rs b/src/frontend/src/table.rs
index 384c7dab6e53..cf7b5596decc 100644
--- a/src/frontend/src/table.rs
+++ b/src/frontend/src/table.rs
@@ -980,7 +980,8 @@ pub(crate) mod test {
impl Collect for MockCollector {
fn on_write(&self, record: WriteRecord) {
- self.write_sum
+ let _ = self
+ .write_sum
.fetch_add(record.byte_count, Ordering::Relaxed);
}
diff --git a/src/frontend/src/table/delete.rs b/src/frontend/src/table/delete.rs
index b620f7f3e5de..b5a6bd4c9336 100644
--- a/src/frontend/src/table/delete.rs
+++ b/src/frontend/src/table/delete.rs
@@ -85,12 +85,13 @@ mod tests {
ColumnSchema::new("id", ConcreteDataType::int32_datatype(), false),
]);
- let mut builder = TableMetaBuilder::default();
- builder.schema(Arc::new(schema));
- builder.primary_key_indices(vec![]);
- builder.next_column_id(2);
+ let table_meta = TableMetaBuilder::default()
+ .schema(Arc::new(schema))
+ .primary_key_indices(vec![])
+ .next_column_id(2)
+ .build()
+ .unwrap();
- let table_meta = builder.build().unwrap();
let table_name = TableName {
catalog_name: "greptime".to_string(),
schema_name: "public".to_string(),
diff --git a/src/frontend/src/table/insert.rs b/src/frontend/src/table/insert.rs
index 4ee1ab84332f..902f2f198399 100644
--- a/src/frontend/src/table/insert.rs
+++ b/src/frontend/src/table/insert.rs
@@ -139,11 +139,12 @@ mod tests {
ColumnSchema::new("v", ConcreteDataType::string_datatype(), true),
]));
- let mut builder = TableMetaBuilder::default();
- builder.schema(schema);
- builder.primary_key_indices(vec![1]);
- builder.next_column_id(3);
- let table_meta = builder.build().unwrap();
+ let table_meta = TableMetaBuilder::default()
+ .schema(schema)
+ .primary_key_indices(vec![1])
+ .next_column_id(3)
+ .build()
+ .unwrap();
let column = vector_to_grpc_column(
&table_meta,
@@ -198,12 +199,13 @@ mod tests {
ColumnSchema::new("host", ConcreteDataType::string_datatype(), false),
]);
- let mut builder = TableMetaBuilder::default();
- builder.schema(Arc::new(schema));
- builder.primary_key_indices(vec![]);
- builder.next_column_id(3);
+ let table_meta = TableMetaBuilder::default()
+ .schema(Arc::new(schema))
+ .primary_key_indices(vec![])
+ .next_column_id(3)
+ .build()
+ .unwrap();
- let table_meta = builder.build().unwrap();
let insert_request = mock_insert_request();
let request = to_grpc_insert_request(&table_meta, 12, insert_request).unwrap();
@@ -211,19 +213,19 @@ mod tests {
}
fn mock_insert_request() -> InsertRequest {
- let mut columns_values = HashMap::with_capacity(4);
-
let mut builder = StringVectorBuilder::with_capacity(3);
builder.push(Some("host1"));
builder.push(None);
builder.push(Some("host3"));
- columns_values.insert("host".to_string(), builder.to_vector());
+ let host = builder.to_vector();
let mut builder = Int16VectorBuilder::with_capacity(3);
builder.push(Some(1_i16));
builder.push(Some(2_i16));
builder.push(Some(3_i16));
- columns_values.insert("id".to_string(), builder.to_vector());
+ let id = builder.to_vector();
+
+ let columns_values = HashMap::from([("host".to_string(), host), ("id".to_string(), id)]);
InsertRequest {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
diff --git a/src/log-store/src/noop.rs b/src/log-store/src/noop.rs
index 09916e005528..edec091077eb 100644
--- a/src/log-store/src/noop.rs
+++ b/src/log-store/src/noop.rs
@@ -130,11 +130,11 @@ mod tests {
async fn test_noop_logstore() {
let store = NoopLogStore::default();
let e = store.entry("".as_bytes(), 1, NamespaceImpl::default());
- store.append(e.clone()).await.unwrap();
- store
+ assert!(store.append(e.clone()).await.is_ok());
+ assert!(store
.append_batch(&NamespaceImpl::default(), vec![e])
.await
- .unwrap();
+ .is_ok());
store
.create_namespace(&NamespaceImpl::default())
.await
diff --git a/src/log-store/src/raft_engine/log_store.rs b/src/log-store/src/raft_engine/log_store.rs
index 1c952680dff9..1b9e4cbed825 100644
--- a/src/log-store/src/raft_engine/log_store.rs
+++ b/src/log-store/src/raft_engine/log_store.rs
@@ -157,7 +157,8 @@ impl LogStore for RaftEngineLogStore {
);
}
- self.engine
+ let _ = self
+ .engine
.write(&mut batch, self.config.sync_write)
.context(RaftEngineSnafu)?;
Ok(AppendResponse { entry_id })
@@ -203,7 +204,8 @@ impl LogStore for RaftEngineLogStore {
);
}
- self.engine
+ let _ = self
+ .engine
.write(&mut batch, self.config.sync_write)
.context(RaftEngineSnafu)?;
Ok(entry_ids)
@@ -231,7 +233,7 @@ impl LogStore for RaftEngineLogStore {
let max_batch_size = self.config.read_batch_size;
let (tx, mut rx) = tokio::sync::mpsc::channel(max_batch_size);
let ns = ns.clone();
- common_runtime::spawn_read(async move {
+ let _handle = common_runtime::spawn_read(async move {
while start_index <= last_index {
let mut vec = Vec::with_capacity(max_batch_size);
match engine
@@ -284,7 +286,8 @@ impl LogStore for RaftEngineLogStore {
batch
.put_message::<Namespace>(SYSTEM_NAMESPACE, key, ns)
.context(RaftEngineSnafu)?;
- self.engine
+ let _ = self
+ .engine
.write(&mut batch, true)
.context(RaftEngineSnafu)?;
Ok(())
@@ -299,7 +302,8 @@ impl LogStore for RaftEngineLogStore {
let key = format!("{}{}", NAMESPACE_PREFIX, ns.id).as_bytes().to_vec();
let mut batch = LogBatch::with_capacity(1);
batch.delete(SYSTEM_NAMESPACE, key);
- self.engine
+ let _ = self
+ .engine
.write(&mut batch, true)
.context(RaftEngineSnafu)?;
Ok(())
@@ -471,10 +475,10 @@ mod tests {
})
.await
.unwrap();
- logstore
+ assert!(logstore
.append(Entry::create(1, 1, "1".as_bytes().to_vec()))
.await
- .unwrap();
+ .is_ok());
let entries = logstore
.read(&Namespace::with_id(1), 1)
.await
@@ -533,7 +537,7 @@ mod tests {
let namespace = Namespace::with_id(42);
for id in 0..4096 {
let entry = Entry::create(id, namespace.id(), [b'x'; 4096].to_vec());
- logstore.append(entry).await.unwrap();
+ assert!(logstore.append(entry).await.is_ok());
}
let before_purge = wal_dir_usage(dir.path().to_str().unwrap()).await;
@@ -565,7 +569,7 @@ mod tests {
let namespace = Namespace::with_id(42);
for id in 0..1024 {
let entry = Entry::create(id, namespace.id(), [b'x'; 4096].to_vec());
- logstore.append(entry).await.unwrap();
+ assert!(logstore.append(entry).await.is_ok());
}
logstore.obsolete(namespace.clone(), 100).await.unwrap();
diff --git a/src/meta-client/examples/meta_client.rs b/src/meta-client/examples/meta_client.rs
index 9e3f7e1a23c1..88958eaaf63d 100644
--- a/src/meta-client/examples/meta_client.rs
+++ b/src/meta-client/examples/meta_client.rs
@@ -58,7 +58,7 @@ async fn run() {
let (sender, mut receiver) = meta_client.heartbeat().await.unwrap();
// send heartbeats
- tokio::spawn(async move {
+ let _handle = tokio::spawn(async move {
for _ in 0..5 {
let req = HeartbeatRequest {
peer: Some(Peer {
@@ -72,7 +72,7 @@ async fn run() {
tokio::time::sleep(Duration::from_secs(10)).await;
});
- tokio::spawn(async move {
+ let _handle = tokio::spawn(async move {
while let Some(res) = receiver.message().await.unwrap() {
event!(Level::TRACE, "heartbeat response: {:#?}", res);
}
diff --git a/src/meta-client/src/client.rs b/src/meta-client/src/client.rs
index 5842b431c48e..93b84c8846f9 100644
--- a/src/meta-client/src/client.rs
+++ b/src/meta-client/src/client.rs
@@ -330,7 +330,7 @@ impl MetaClient {
}
pub async fn unlock(&self, req: UnlockRequest) -> Result<()> {
- self.lock_client()?.unlock(req.into()).await?;
+ let _ = self.lock_client()?.unlock(req.into()).await?;
Ok(())
}
@@ -577,7 +577,7 @@ mod tests {
let tc = new_client("test_heartbeat").await;
let (sender, mut receiver) = tc.client.heartbeat().await.unwrap();
// send heartbeats
- tokio::spawn(async move {
+ let _handle = tokio::spawn(async move {
for _ in 0..5 {
let req = HeartbeatRequest {
peer: Some(Peer {
@@ -590,7 +590,7 @@ mod tests {
}
});
- tokio::spawn(async move {
+ let _handle = tokio::spawn(async move {
while let Some(res) = receiver.message().await.unwrap() {
assert_eq!(1000, res.header.unwrap().cluster_id);
}
diff --git a/src/meta-client/src/client/heartbeat.rs b/src/meta-client/src/client/heartbeat.rs
index 1d8ff1bbbb1f..1b563bc691b8 100644
--- a/src/meta-client/src/client/heartbeat.rs
+++ b/src/meta-client/src/client/heartbeat.rs
@@ -292,7 +292,7 @@ mod test {
async fn test_heartbeat_stream() {
let (sender, mut receiver) = mpsc::channel::<HeartbeatRequest>(100);
let sender = HeartbeatSender::new((8, 8), Role::Datanode, sender);
- tokio::spawn(async move {
+ let _handle = tokio::spawn(async move {
for _ in 0..10 {
sender.send(HeartbeatRequest::default()).await.unwrap();
}
diff --git a/src/meta-srv/src/bootstrap.rs b/src/meta-srv/src/bootstrap.rs
index 6b56b822a9e3..ea55b06e77d3 100644
--- a/src/meta-srv/src/bootstrap.rs
+++ b/src/meta-srv/src/bootstrap.rs
@@ -132,7 +132,7 @@ pub async fn bootstrap_meta_srv_with_router(
router
.serve_with_incoming_shutdown(listener, async {
- signal.recv().await;
+ let _ = signal.recv().await;
})
.await
.context(error::StartGrpcSnafu)?;
diff --git a/src/meta-srv/src/cluster.rs b/src/meta-srv/src/cluster.rs
index 1c63ad5cef23..66e84d06a781 100644
--- a/src/meta-srv/src/cluster.rs
+++ b/src/meta-srv/src/cluster.rs
@@ -201,7 +201,7 @@ impl MetaPeerClient {
fn to_stat_kv_map(kvs: Vec<KeyValue>) -> Result<HashMap<StatKey, StatValue>> {
let mut map = HashMap::with_capacity(kvs.len());
for kv in kvs {
- map.insert(kv.key.try_into()?, kv.value.try_into()?);
+ let _ = map.insert(kv.key.try_into()?, kv.value.try_into()?);
}
Ok(map)
}
diff --git a/src/meta-srv/src/election/etcd.rs b/src/meta-srv/src/election/etcd.rs
index a14e90350bf1..96db650853e5 100644
--- a/src/meta-srv/src/election/etcd.rs
+++ b/src/meta-srv/src/election/etcd.rs
@@ -59,7 +59,7 @@ impl EtcdElection {
let leader_ident = leader_value.clone();
let (tx, mut rx) = broadcast::channel(100);
- common_runtime::spawn_bg(async move {
+ let _handle = common_runtime::spawn_bg(async move {
loop {
match rx.recv().await {
Ok(msg) => match msg {
@@ -142,7 +142,7 @@ impl Election for EtcdElection {
let mut keep_alive_interval =
tokio::time::interval(Duration::from_secs(KEEP_ALIVE_PERIOD_SECS));
loop {
- keep_alive_interval.tick().await;
+ let _ = keep_alive_interval.tick().await;
keeper.keep_alive().await.context(error::EtcdFailedSnafu)?;
if let Some(res) = receiver.message().await.context(error::EtcdFailedSnafu)? {
diff --git a/src/meta-srv/src/handler.rs b/src/meta-srv/src/handler.rs
index b2727a8ef226..f675bc5fedf0 100644
--- a/src/meta-srv/src/handler.rs
+++ b/src/meta-srv/src/handler.rs
@@ -286,7 +286,7 @@ impl HeartbeatMailbox {
let mailbox = Arc::new(Self::new(pushers, sequence));
let timeout_checker = mailbox.clone();
- common_runtime::spawn_bg(async move {
+ let _handle = common_runtime::spawn_bg(async move {
timeout_checker.check_timeout_bg(10).await;
});
@@ -307,7 +307,7 @@ impl HeartbeatMailbox {
let mut interval = tokio::time::interval(Duration::from_millis(interval_millis));
loop {
- interval.tick().await;
+ let _ = interval.tick().await;
if self.timeouts.is_empty() {
self.timeout_notify.notified().await;
@@ -363,10 +363,10 @@ impl Mailbox for HeartbeatMailbox {
debug!("Sending mailbox message {msg:?} to {pusher_id}");
let (tx, rx) = oneshot::channel();
- self.senders.insert(message_id, tx);
+ let _ = self.senders.insert(message_id, tx);
let deadline =
Duration::from_millis(common_time::util::current_time_millis() as u64) + timeout;
- self.timeouts.insert(message_id, deadline);
+ let _ = self.timeouts.insert(message_id, deadline);
self.timeout_notify.notify_one();
self.pushers.push(&pusher_id, msg).await?;
@@ -381,7 +381,7 @@ impl Mailbox for HeartbeatMailbox {
async fn on_recv(&self, id: MessageId, maybe_msg: Result<MailboxMessage>) -> Result<()> {
debug!("Received mailbox message {maybe_msg:?}");
- self.timeouts.remove(&id);
+ let _ = self.timeouts.remove(&id);
if let Some((_, tx)) = self.senders.remove(&id) {
tx.send(maybe_msg)
diff --git a/src/meta-srv/src/handler/failure_handler/runner.rs b/src/meta-srv/src/handler/failure_handler/runner.rs
index c292c118b9bd..ec671a557e45 100644
--- a/src/meta-srv/src/handler/failure_handler/runner.rs
+++ b/src/meta-srv/src/handler/failure_handler/runner.rs
@@ -296,7 +296,7 @@ mod tests {
datanode_id: 2,
region_number: 1,
};
- container.get_failure_detector(ident.clone());
+ let _ = container.get_failure_detector(ident.clone());
let region_failover_manager = create_region_failover_manager();
let mut runner = FailureDetectRunner::new(None, region_failover_manager);
diff --git a/src/meta-srv/src/handler/keep_lease_handler.rs b/src/meta-srv/src/handler/keep_lease_handler.rs
index 24fa22134b60..cc8a67500dd3 100644
--- a/src/meta-srv/src/handler/keep_lease_handler.rs
+++ b/src/meta-srv/src/handler/keep_lease_handler.rs
@@ -30,7 +30,7 @@ pub struct KeepLeaseHandler {
impl KeepLeaseHandler {
pub fn new(kv_store: KvStoreRef) -> Self {
let (tx, mut rx) = mpsc::channel(1024);
- common_runtime::spawn_bg(async move {
+ let _handle = common_runtime::spawn_bg(async move {
while let Some(kv) = rx.recv().await {
let mut kvs = vec![kv];
diff --git a/src/meta-srv/src/handler/persist_stats_handler.rs b/src/meta-srv/src/handler/persist_stats_handler.rs
index 2b1ad61e110b..d182daa0d4a8 100644
--- a/src/meta-srv/src/handler/persist_stats_handler.rs
+++ b/src/meta-srv/src/handler/persist_stats_handler.rs
@@ -122,7 +122,7 @@ impl HeartbeatHandler for PersistStatsHandler {
..Default::default()
};
- ctx.in_memory.put(put).await?;
+ let _ = ctx.in_memory.put(put).await?;
Ok(())
}
diff --git a/src/meta-srv/src/handler/region_lease_handler.rs b/src/meta-srv/src/handler/region_lease_handler.rs
index 6eeb0ef0bb2e..586d124c0303 100644
--- a/src/meta-srv/src/handler/region_lease_handler.rs
+++ b/src/meta-srv/src/handler/region_lease_handler.rs
@@ -174,7 +174,7 @@ mod test {
table_id: 1,
engine: "mito".to_string(),
};
- region_failover_manager
+ let _ = region_failover_manager
.running_procedures()
.write()
.unwrap()
diff --git a/src/meta-srv/src/lease.rs b/src/meta-srv/src/lease.rs
index b951fd9f87dc..89b216dd7368 100644
--- a/src/meta-srv/src/lease.rs
+++ b/src/meta-srv/src/lease.rs
@@ -60,7 +60,7 @@ where
if !predicate(&lease_key, &lease_value) {
continue;
}
- lease_kvs.insert(lease_key, lease_value);
+ let _ = lease_kvs.insert(lease_key, lease_value);
}
Ok(lease_kvs)
diff --git a/src/meta-srv/src/lock.rs b/src/meta-srv/src/lock.rs
index 8defc86b4e9a..ded2cd348362 100644
--- a/src/meta-srv/src/lock.rs
+++ b/src/meta-srv/src/lock.rs
@@ -90,7 +90,7 @@ impl Drop for DistLockGuard<'_> {
if let Some(key) = self.key.take() {
let lock = self.lock.clone();
let name = self.name.clone();
- common_runtime::spawn_bg(async move {
+ let _handle = common_runtime::spawn_bg(async move {
if let Err(e) = lock.unlock(key).await {
error!(e; "Failed to unlock '{}'", String::from_utf8_lossy(&name));
}
diff --git a/src/meta-srv/src/lock/memory.rs b/src/meta-srv/src/lock/memory.rs
index 72637700f144..d312700d129c 100644
--- a/src/meta-srv/src/lock/memory.rs
+++ b/src/meta-srv/src/lock/memory.rs
@@ -38,14 +38,14 @@ impl DistLock for MemLock {
let guard = mutex.lock_owned().await;
- self.guards.insert(key.clone(), guard);
+ let _ = self.guards.insert(key.clone(), guard);
Ok(key)
}
async fn unlock(&self, key: Vec<u8>) -> Result<()> {
// drop the guard, so that the mutex can be unlocked,
// effectively make the `mutex.lock_owned` in `lock` method to proceed
- self.guards.remove(&key);
+ let _ = self.guards.remove(&key);
Ok(())
}
}
@@ -85,10 +85,10 @@ mod tests {
// every key counter will be added by 1 for 10 times
for i in 0..100 {
let key = &keys[i % keys.len()];
- lock_clone
+ assert!(lock_clone
.lock(key.clone(), Opts { expire_secs: None })
.await
- .unwrap();
+ .is_ok());
// Intentionally create a critical section:
// if our MemLock is flawed, the resulting counter is wrong.
@@ -105,7 +105,7 @@ mod tests {
})
})
.collect::<Vec<_>>();
- futures::future::join_all(tasks).await;
+ let _ = futures::future::join_all(tasks).await;
assert!(counters.values().all(|x| x.load(Ordering::Relaxed) == 1000));
}
diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs
index 6dbda2729040..f231a0ad9fec 100644
--- a/src/meta-srv/src/metasrv.rs
+++ b/src/meta-srv/src/metasrv.rs
@@ -145,7 +145,7 @@ impl MetaSrv {
if let Some(election) = self.election() {
let procedure_manager = self.procedure_manager.clone();
let mut rx = election.subscribe_leader_change();
- common_runtime::spawn_bg(async move {
+ let _handle = common_runtime::spawn_bg(async move {
loop {
match rx.recv().await {
Ok(msg) => match msg {
@@ -171,7 +171,7 @@ impl MetaSrv {
let election = election.clone();
let started = self.started.clone();
- common_runtime::spawn_bg(async move {
+ let _handle = common_runtime::spawn_bg(async move {
while started.load(Ordering::Relaxed) {
let res = election.campaign().await;
if let Err(e) = res {
diff --git a/src/meta-srv/src/mocks.rs b/src/meta-srv/src/mocks.rs
index 2b3ee8012ea9..d65a6b47475f 100644
--- a/src/meta-srv/src/mocks.rs
+++ b/src/meta-srv/src/mocks.rs
@@ -77,7 +77,7 @@ pub async fn mock(
let (client, server) = tokio::io::duplex(1024);
let service = meta_srv.clone();
- tokio::spawn(async move {
+ let _handle = tokio::spawn(async move {
tonic::transport::Server::builder()
.add_service(HeartbeatServer::new(service.clone()))
.add_service(RouterServer::new(service.clone()))
diff --git a/src/meta-srv/src/procedure/region_failover.rs b/src/meta-srv/src/procedure/region_failover.rs
index 828f69eb5469..0dcb213aa586 100644
--- a/src/meta-srv/src/procedure/region_failover.rs
+++ b/src/meta-srv/src/procedure/region_failover.rs
@@ -84,7 +84,7 @@ struct FailoverProcedureGuard {
impl Drop for FailoverProcedureGuard {
fn drop(&mut self) {
- self.running_procedures.write().unwrap().remove(&self.key);
+ let _ = self.running_procedures.write().unwrap().remove(&self.key);
}
}
@@ -177,7 +177,7 @@ impl RegionFailoverManager {
let procedure_manager = self.procedure_manager.clone();
let failed_region = failed_region.clone();
- common_runtime::spawn_bg(async move {
+ let _handle = common_runtime::spawn_bg(async move {
let _ = guard;
let watcher = &mut match procedure_manager.submit(procedure_with_id).await {
@@ -474,7 +474,7 @@ mod tests {
let (_, table_global_value) =
table_routes::tests::prepare_table_global_value(&kv_store, table).await;
- table_routes::tests::prepare_table_route_value(&kv_store, table).await;
+ let _ = table_routes::tests::prepare_table_route_value(&kv_store, table).await;
let pushers = Pushers::default();
let mut heartbeat_receivers = HashMap::with_capacity(3);
@@ -485,7 +485,7 @@ mod tests {
let pusher = Pusher::new(tx, &RequestHeader::default());
let _ = pushers.insert(pusher_id, pusher).await;
- heartbeat_receivers.insert(datanode_id, rx);
+ let _ = heartbeat_receivers.insert(datanode_id, rx);
}
let mailbox_sequence =
@@ -541,7 +541,7 @@ mod tests {
.unwrap();
let mailbox_clone = env.context.mailbox.clone();
let failed_region_clone = failed_region.clone();
- common_runtime::spawn_bg(async move {
+ let _handle = common_runtime::spawn_bg(async move {
let resp = failed_datanode.recv().await.unwrap().unwrap();
let received = &resp.mailbox_message.unwrap();
assert_eq!(
@@ -580,7 +580,7 @@ mod tests {
let mailbox_clone = env.context.mailbox.clone();
let failed_region_clone = failed_region.clone();
let candidate_tx = candidate_tx.clone();
- common_runtime::spawn_bg(async move {
+ let _handle = common_runtime::spawn_bg(async move {
let resp = recv.recv().await.unwrap().unwrap();
let received = &resp.mailbox_message.unwrap();
assert_eq!(
diff --git a/src/meta-srv/src/procedure/region_failover/invalidate_cache.rs b/src/meta-srv/src/procedure/region_failover/invalidate_cache.rs
index e43ec05cb106..0d46e206bf25 100644
--- a/src/meta-srv/src/procedure/region_failover/invalidate_cache.rs
+++ b/src/meta-srv/src/procedure/region_failover/invalidate_cache.rs
@@ -105,7 +105,7 @@ mod tests {
let pusher = Pusher::new(tx, &RequestHeader::default());
let _ = pushers.insert(pusher_id, pusher).await;
- heartbeat_receivers.insert(frontend_id, rx);
+ let _ = heartbeat_receivers.insert(frontend_id, rx);
}
let state = InvalidateCache;
diff --git a/src/meta-srv/src/procedure/region_failover/update_metadata.rs b/src/meta-srv/src/procedure/region_failover/update_metadata.rs
index e8350f6c65b9..eac9c19800c6 100644
--- a/src/meta-srv/src/procedure/region_failover/update_metadata.rs
+++ b/src/meta-srv/src/procedure/region_failover/update_metadata.rs
@@ -83,7 +83,7 @@ impl UpdateRegionMetadata {
region_numbers.retain(|x| *x != failed_region.region_number);
if !region_numbers.is_empty() {
- value
+ let _ = value
.regions_id_map
.insert(failed_region.datanode_id, region_numbers);
}
@@ -467,7 +467,7 @@ mod tests {
let table_name = failed_region_1.table_ident.table.clone();
let table_id = failed_region_1.table_ident.table_id as u64;
- futures::future::join_all(vec![
+ let _ = futures::future::join_all(vec![
tokio::spawn(async move {
let state = UpdateRegionMetadata::new(Peer::new(2, ""));
state
diff --git a/src/meta-srv/src/service/admin.rs b/src/meta-srv/src/service/admin.rs
index 73008f697505..43adfd0fe731 100644
--- a/src/meta-srv/src/service/admin.rs
+++ b/src/meta-srv/src/service/admin.rs
@@ -170,7 +170,7 @@ impl Router {
pub fn route(mut self, path: &str, handler: impl HttpHandler + 'static) -> Self {
check_path(path);
- self.handlers.insert(path.to_owned(), Box::new(handler));
+ let _ = self.handlers.insert(path.to_owned(), Box::new(handler));
self
}
diff --git a/src/meta-srv/src/service/admin/meta.rs b/src/meta-srv/src/service/admin/meta.rs
index 8d40fe49ba49..4e9923d222bd 100644
--- a/src/meta-srv/src/service/admin/meta.rs
+++ b/src/meta-srv/src/service/admin/meta.rs
@@ -180,27 +180,27 @@ mod tests {
let catalog = CatalogKey {
catalog_name: catalog_name.to_string(),
};
- in_mem
+ assert!(in_mem
.put(PutRequest {
key: catalog.to_string().as_bytes().to_vec(),
value: "".as_bytes().to_vec(),
..Default::default()
})
.await
- .unwrap();
+ .is_ok());
let schema = SchemaKey {
catalog_name: catalog_name.to_string(),
schema_name: schema_name.to_string(),
};
- in_mem
+ assert!(in_mem
.put(PutRequest {
key: schema.to_string().as_bytes().to_vec(),
value: "".as_bytes().to_vec(),
..Default::default()
})
.await
- .unwrap();
+ .is_ok());
let table1 = TableGlobalKey {
catalog_name: catalog_name.to_string(),
@@ -212,22 +212,22 @@ mod tests {
schema_name: schema_name.to_string(),
table_name: "test_table1".to_string(),
};
- in_mem
+ assert!(in_mem
.put(PutRequest {
key: table1.to_string().as_bytes().to_vec(),
value: "".as_bytes().to_vec(),
..Default::default()
})
.await
- .unwrap();
- in_mem
+ .is_ok());
+ assert!(in_mem
.put(PutRequest {
key: table2.to_string().as_bytes().to_vec(),
value: "".as_bytes().to_vec(),
..Default::default()
})
.await
- .unwrap();
+ .is_ok());
let catalog_key = get_keys_by_prefix(build_catalog_prefix(), &in_mem)
.await
diff --git a/src/meta-srv/src/service/heartbeat.rs b/src/meta-srv/src/service/heartbeat.rs
index 0da329fd14c5..e3f7bbd21df9 100644
--- a/src/meta-srv/src/service/heartbeat.rs
+++ b/src/meta-srv/src/service/heartbeat.rs
@@ -45,7 +45,7 @@ impl heartbeat_server::Heartbeat for MetaSrv {
let (tx, rx) = mpsc::channel(128);
let handler_group = self.handler_group();
let ctx = self.new_ctx();
- common_runtime::spawn_bg(async move {
+ let _handle = common_runtime::spawn_bg(async move {
let mut pusher_key = None;
while let Some(msg) = in_stream.next().await {
let mut is_not_leader = false;
diff --git a/src/meta-srv/src/service/store/ext.rs b/src/meta-srv/src/service/store/ext.rs
index 2cbe2c18cac3..033ff6da6b63 100644
--- a/src/meta-srv/src/service/store/ext.rs
+++ b/src/meta-srv/src/service/store/ext.rs
@@ -157,22 +157,22 @@ mod tests {
}
async fn put_stats_to_store(store: &mut KvStoreRef) {
- store
+ assert!(store
.put(PutRequest {
key: "test_key1".as_bytes().to_vec(),
value: "test_val1".as_bytes().to_vec(),
..Default::default()
})
.await
- .unwrap();
+ .is_ok());
- store
+ assert!(store
.put(PutRequest {
key: "test_key2".as_bytes().to_vec(),
value: "test_val2".as_bytes().to_vec(),
..Default::default()
})
.await
- .unwrap();
+ .is_ok());
}
}
diff --git a/src/meta-srv/src/service/store/memory.rs b/src/meta-srv/src/service/store/memory.rs
index b4cc67ad8819..0d9e58f7762b 100644
--- a/src/meta-srv/src/service/store/memory.rs
+++ b/src/meta-srv/src/service/store/memory.rs
@@ -178,7 +178,7 @@ impl KvStore for MemStore {
.collect()
} else {
for kv in kvs.into_iter() {
- memory.insert(kv.key, kv.value);
+ let _ = memory.insert(kv.key, kv.value);
}
vec![]
};
@@ -208,7 +208,7 @@ impl KvStore for MemStore {
.collect()
} else {
for key in keys.into_iter() {
- memory.remove(&key);
+ let _ = memory.remove(&key);
}
vec![]
};
@@ -236,7 +236,7 @@ impl KvStore for MemStore {
Entry::Vacant(e) => {
let success = expect.is_empty();
if success {
- e.insert(value);
+ let _ = e.insert(value);
}
(success, None)
}
@@ -245,7 +245,7 @@ impl KvStore for MemStore {
let prev_val = e.get().clone();
let success = prev_val == expect;
if success {
- e.insert(value);
+ let _ = e.insert(value);
}
(success, Some((key, prev_val)))
}
@@ -320,7 +320,7 @@ impl KvStore for MemStore {
let kv = match memory.remove(&from_key) {
Some(v) => {
- memory.insert(to_key, v.clone());
+ let _ = memory.insert(to_key, v.clone());
Some((from_key, v))
}
None => memory.get(&to_key).map(|v| (to_key, v.clone())),
@@ -419,22 +419,22 @@ mod tests {
let kv_store = MemStore::new();
let kvs = mock_kvs();
- kv_store
+ assert!(kv_store
.batch_put(BatchPutRequest {
kvs,
..Default::default()
})
.await
- .unwrap();
+ .is_ok());
- kv_store
+ assert!(kv_store
.put(PutRequest {
key: b"key11".to_vec(),
value: b"val11".to_vec(),
..Default::default()
})
.await
- .unwrap();
+ .is_ok());
kv_store
}
@@ -614,7 +614,7 @@ mod tests {
};
let resp = kv_store_clone.compare_and_put(req).await.unwrap();
if resp.success {
- success_clone.fetch_add(1, Ordering::SeqCst);
+ let _ = success_clone.fetch_add(1, Ordering::SeqCst);
}
});
joins.push(join);
diff --git a/src/meta-srv/src/service/store/txn.rs b/src/meta-srv/src/service/store/txn.rs
index aaaf7b953acc..a636474f1e83 100644
--- a/src/meta-srv/src/service/store/txn.rs
+++ b/src/meta-srv/src/service/store/txn.rs
@@ -301,7 +301,7 @@ mod tests {
async fn test_txn_compare_equal() {
let kv_store = create_kv_store().await;
let key = vec![101u8];
- kv_store.delete(key.clone(), false).await.unwrap();
+ assert!(kv_store.delete(key.clone(), false).await.is_ok());
let txn = Txn::new()
.when(vec![Compare::with_not_exist_value(
@@ -332,7 +332,7 @@ mod tests {
async fn test_txn_compare_greater() {
let kv_store = create_kv_store().await;
let key = vec![102u8];
- kv_store.delete(key.clone(), false).await.unwrap();
+ assert!(kv_store.delete(key.clone(), false).await.is_ok());
let txn = Txn::new()
.when(vec![Compare::with_not_exist_value(
@@ -375,7 +375,7 @@ mod tests {
async fn test_txn_compare_less() {
let kv_store = create_kv_store().await;
let key = vec![103u8];
- kv_store.delete(vec![3], false).await.unwrap();
+ assert!(kv_store.delete(vec![3], false).await.is_ok());
let txn = Txn::new()
.when(vec![Compare::with_not_exist_value(
@@ -418,7 +418,7 @@ mod tests {
async fn test_txn_compare_not_equal() {
let kv_store = create_kv_store().await;
let key = vec![104u8];
- kv_store.delete(key.clone(), false).await.unwrap();
+ assert!(kv_store.delete(key.clone(), false).await.is_ok());
let txn = Txn::new()
.when(vec![Compare::with_not_exist_value(
diff --git a/src/meta-srv/src/table_routes.rs b/src/meta-srv/src/table_routes.rs
index 35e1a8a02b33..5b5dd260d045 100644
--- a/src/meta-srv/src/table_routes.rs
+++ b/src/meta-srv/src/table_routes.rs
@@ -54,12 +54,12 @@ pub(crate) async fn batch_get_table_global_value(
for kv in kvs {
let key = TableGlobalKey::try_from_raw_key(kv.key()).context(InvalidCatalogValueSnafu)?;
let value = TableGlobalValue::from_bytes(kv.value()).context(InvalidCatalogValueSnafu)?;
- result.insert(key, Some(value));
+ let _ = result.insert(key, Some(value));
}
for key in keys {
if !result.contains_key(key) {
- result.insert(key.clone(), None);
+ let _ = result.insert(key.clone(), None);
}
}
Ok(result)
diff --git a/src/mito/src/engine.rs b/src/mito/src/engine.rs
index d64ed4bb4ea5..1ef3a1cf5d31 100644
--- a/src/mito/src/engine.rs
+++ b/src/mito/src/engine.rs
@@ -445,7 +445,7 @@ impl<S: StorageEngine> MitoEngineInner<S> {
let region = self
.open_region(&engine_ctx, table_id, *region_number, &table_ref, &opts)
.await?;
- regions.insert(*region_number, region);
+ let _ = regions.insert(*region_number, region);
}
let table = Arc::new(MitoTable::new(table_info, regions, manifest));
@@ -561,7 +561,7 @@ impl<S: StorageEngine> MitoEngineInner<S> {
let table = self.recover_table(ctx, request.clone()).await?;
if let Some(table) = table {
// already locked
- self.tables.insert(request.table_id, table.clone());
+ let _ = self.tables.insert(request.table_id, table.clone());
Some(table as _)
} else {
@@ -639,7 +639,7 @@ impl<S: StorageEngine> MitoEngineInner<S> {
}
async fn close(&self) -> TableResult<()> {
- futures::future::try_join_all(
+ let _ = futures::future::try_join_all(
self.tables
.iter()
.map(|item| self.close_table_inner(item.value().clone(), None, false)),
@@ -694,7 +694,7 @@ impl<S: StorageEngine> MitoEngineInner<S> {
}
if table.is_releasable() {
- self.tables.remove(&table_id);
+ let _ = self.tables.remove(&table_id);
logging::info!(
"Mito engine closed table: {} in schema: {}",
diff --git a/src/mito/src/engine/procedure/alter.rs b/src/mito/src/engine/procedure/alter.rs
index fadd14e49b96..84844721ffd7 100644
--- a/src/mito/src/engine/procedure/alter.rs
+++ b/src/mito/src/engine/procedure/alter.rs
@@ -55,7 +55,7 @@ impl<S: StorageEngine> Procedure for AlterMitoTable<S> {
match self.data.state {
AlterTableState::Prepare => self.on_prepare(),
AlterTableState::EngineAlterTable => {
- self.engine_alter_table().await?;
+ let _ = self.engine_alter_table().await?;
Ok(Status::Done)
}
}
@@ -223,7 +223,8 @@ impl<S: StorageEngine> AlterMitoTable<S> {
// It is possible that we write the manifest multiple times and bump the manifest
// version, but it is still correct as we always write the new table info.
- self.table
+ let _ = self
+ .table
.manifest()
.update(TableMetaActionList::with_action(TableMetaAction::Change(
Box::new(TableChange {
diff --git a/src/mito/src/engine/procedure/create.rs b/src/mito/src/engine/procedure/create.rs
index 93c75ff606de..bafd0a763136 100644
--- a/src/mito/src/engine/procedure/create.rs
+++ b/src/mito/src/engine/procedure/create.rs
@@ -165,7 +165,7 @@ impl<S: StorageEngine> CreateMitoTable<S> {
);
let _lock = self.creator.engine_inner.table_mutex.lock(table_id).await;
- self.creator.create_table().await?;
+ let _ = self.creator.create_table().await?;
Ok(Status::Done)
}
@@ -275,7 +275,7 @@ impl<S: StorageEngine> TableCreator<S> {
.map_err(Error::from_error_ext)?
{
// Region already exists.
- self.regions.insert(*number, region);
+ let _ = self.regions.insert(*number, region);
continue;
}
@@ -308,7 +308,7 @@ impl<S: StorageEngine> TableCreator<S> {
region_id
);
- self.regions.insert(*number, region);
+ let _ = self.regions.insert(*number, region);
}
Ok(())
@@ -324,7 +324,8 @@ impl<S: StorageEngine> TableCreator<S> {
{
let table = Arc::new(MitoTable::new(table_info, self.regions.clone(), manifest));
- self.engine_inner
+ let _ = self
+ .engine_inner
.tables
.insert(self.data.request.id, table.clone());
return Ok(table);
@@ -334,7 +335,8 @@ impl<S: StorageEngine> TableCreator<S> {
let table = self.write_manifest_and_create_table(table_dir).await?;
let table = Arc::new(table);
- self.engine_inner
+ let _ = self
+ .engine_inner
.tables
.insert(self.data.request.id, table.clone());
diff --git a/src/mito/src/engine/procedure/drop.rs b/src/mito/src/engine/procedure/drop.rs
index c447db10e536..13cdcf1aa4f3 100644
--- a/src/mito/src/engine/procedure/drop.rs
+++ b/src/mito/src/engine/procedure/drop.rs
@@ -44,7 +44,8 @@ impl<S: StorageEngine> Procedure for DropMitoTable<S> {
match self.data.state {
DropTableState::Prepare => self.on_prepare(),
DropTableState::EngineDropTable => {
- self.engine_inner
+ let _ = self
+ .engine_inner
.drop_table(self.data.request.clone())
.await
.map_err(Error::from_error_ext)?;
diff --git a/src/mito/src/engine/tests.rs b/src/mito/src/engine/tests.rs
index 816ccc68dcea..aa015bbff3d2 100644
--- a/src/mito/src/engine/tests.rs
+++ b/src/mito/src/engine/tests.rs
@@ -115,12 +115,12 @@ async fn setup_table_with_column_default_constraint() -> (TempDir, String, Table
async fn test_column_default_constraint() {
let (_dir, table_name, table) = setup_table_with_column_default_constraint().await;
- let mut columns_values: HashMap<String, VectorRef> = HashMap::with_capacity(4);
let names: VectorRef = Arc::new(StringVector::from(vec!["first", "second"]));
let tss: VectorRef = Arc::new(TimestampMillisecondVector::from_vec(vec![1, 2]));
-
- columns_values.insert("name".to_string(), names.clone());
- columns_values.insert("ts".to_string(), tss.clone());
+ let columns_values = HashMap::from([
+ ("name".to_string(), names.clone()),
+ ("ts".to_string(), tss.clone()),
+ ]);
let insert_req = new_insert_request(table_name.to_string(), columns_values);
assert_eq!(2, table.insert(insert_req).await.unwrap());
@@ -145,14 +145,14 @@ async fn test_column_default_constraint() {
async fn test_insert_with_column_default_constraint() {
let (_dir, table_name, table) = setup_table_with_column_default_constraint().await;
- let mut columns_values: HashMap<String, VectorRef> = HashMap::with_capacity(4);
let names: VectorRef = Arc::new(StringVector::from(vec!["first", "second"]));
let nums: VectorRef = Arc::new(Int32Vector::from(vec![None, Some(66)]));
let tss: VectorRef = Arc::new(TimestampMillisecondVector::from_vec(vec![1, 2]));
-
- columns_values.insert("name".to_string(), names.clone());
- columns_values.insert("n".to_string(), nums.clone());
- columns_values.insert("ts".to_string(), tss.clone());
+ let columns_values = HashMap::from([
+ ("name".to_string(), names.clone()),
+ ("n".to_string(), nums.clone()),
+ ("ts".to_string(), tss.clone()),
+ ]);
let insert_req = new_insert_request(table_name.to_string(), columns_values);
assert_eq!(2, table.insert(insert_req).await.unwrap());
@@ -243,16 +243,16 @@ async fn test_create_table_insert_scan() {
let insert_req = new_insert_request("demo".to_string(), HashMap::default());
assert_eq!(0, table.insert(insert_req).await.unwrap());
- let mut columns_values: HashMap<String, VectorRef> = HashMap::with_capacity(4);
let hosts: VectorRef = Arc::new(StringVector::from(vec!["host1", "host2"]));
let cpus: VectorRef = Arc::new(Float64Vector::from_vec(vec![55.5, 66.6]));
let memories: VectorRef = Arc::new(Float64Vector::from_vec(vec![1024f64, 4096f64]));
let tss: VectorRef = Arc::new(TimestampMillisecondVector::from_vec(vec![1, 2]));
-
- columns_values.insert("host".to_string(), hosts.clone());
- columns_values.insert("cpu".to_string(), cpus.clone());
- columns_values.insert("memory".to_string(), memories.clone());
- columns_values.insert("ts".to_string(), tss.clone());
+ let columns_values = HashMap::from([
+ ("host".to_string(), hosts.clone()),
+ ("cpu".to_string(), cpus.clone()),
+ ("memory".to_string(), memories.clone()),
+ ("ts".to_string(), tss.clone()),
+ ]);
let insert_req = new_insert_request("demo".to_string(), columns_values);
assert_eq!(2, table.insert(insert_req).await.unwrap());
@@ -327,7 +327,6 @@ async fn test_create_table_scan_batches() {
let default_batch_size = ReadContext::default().batch_size;
// Insert more than batch size rows to the table.
let test_batch_size = default_batch_size * 4;
- let mut columns_values: HashMap<String, VectorRef> = HashMap::with_capacity(4);
let hosts: VectorRef = Arc::new(StringVector::from(vec!["host1"; test_batch_size]));
let cpus: VectorRef = Arc::new(Float64Vector::from_vec(vec![55.5; test_batch_size]));
let memories: VectorRef = Arc::new(Float64Vector::from_vec(vec![1024f64; test_batch_size]));
@@ -335,10 +334,12 @@ async fn test_create_table_scan_batches() {
(0..test_batch_size).map(|v| v as i64),
));
- columns_values.insert("host".to_string(), hosts);
- columns_values.insert("cpu".to_string(), cpus);
- columns_values.insert("memory".to_string(), memories);
- columns_values.insert("ts".to_string(), tss.clone());
+ let columns_values = HashMap::from([
+ ("host".to_string(), hosts),
+ ("cpu".to_string(), cpus),
+ ("memory".to_string(), memories),
+ ("ts".to_string(), tss.clone()),
+ ]);
let insert_req = new_insert_request("demo".to_string(), columns_values);
assert_eq!(test_batch_size, table.insert(insert_req).await.unwrap());
@@ -822,7 +823,7 @@ async fn test_drop_table() {
region_numbers: vec![0],
engine: MITO_ENGINE.to_string(),
};
- table_engine.create_table(&ctx, request).await.unwrap();
+ assert!(table_engine.create_table(&ctx, request).await.is_ok());
assert!(table_engine.table_exists(&engine_ctx, table_id));
}
@@ -834,27 +835,26 @@ async fn test_table_delete_rows() {
..
} = test_util::setup_test_engine_and_table().await;
- let mut columns_values: HashMap<String, VectorRef> = HashMap::with_capacity(4);
let hosts: VectorRef = Arc::new(StringVector::from(vec!["host1", "host2", "host3", "host4"]));
let cpus: VectorRef = Arc::new(Float64Vector::from_vec(vec![1.0, 2.0, 3.0, 4.0]));
let memories: VectorRef = Arc::new(Float64Vector::from_vec(vec![1.0, 2.0, 3.0, 4.0]));
let tss: VectorRef = Arc::new(TimestampMillisecondVector::from_vec(vec![1, 2, 2, 1]));
-
- columns_values.insert("host".to_string(), hosts.clone());
- columns_values.insert("cpu".to_string(), cpus.clone());
- columns_values.insert("memory".to_string(), memories.clone());
- columns_values.insert("ts".to_string(), tss.clone());
+ let columns_values = HashMap::from([
+ ("host".to_string(), hosts.clone()),
+ ("cpu".to_string(), cpus.clone()),
+ ("memory".to_string(), memories.clone()),
+ ("ts".to_string(), tss.clone()),
+ ]);
let insert_req = new_insert_request("demo".to_string(), columns_values);
assert_eq!(4, table.insert(insert_req).await.unwrap());
let del_hosts: VectorRef = Arc::new(StringVector::from(vec!["host1", "host3"]));
let del_tss: VectorRef = Arc::new(TimestampMillisecondVector::from_vec(vec![1, 2]));
- let mut key_column_values = HashMap::with_capacity(2);
- key_column_values.insert("host".to_string(), del_hosts);
- key_column_values.insert("ts".to_string(), del_tss);
+ let key_column_values =
+ HashMap::from([("host".to_string(), del_hosts), ("ts".to_string(), del_tss)]);
let del_req = DeleteRequest { key_column_values };
- table.delete(del_req).await.unwrap();
+ assert!(table.delete(del_req).await.is_ok());
let session_ctx = SessionContext::new();
let stream = table.scan(None, &[], None).await.unwrap();
diff --git a/src/mito/src/manifest/action.rs b/src/mito/src/manifest/action.rs
index 0767b7a9524b..4e6a4b7f1dab 100644
--- a/src/mito/src/manifest/action.rs
+++ b/src/mito/src/manifest/action.rs
@@ -183,13 +183,13 @@ mod tests {
#[test]
fn test_table_manifest_compatibility() {
let table_change = r#"{"table_info":{"ident":{"table_id":0,"version":0},"name":"demo","desc":null,"catalog_name":"greptime","schema_name":"public","meta":{"schema":{"column_schemas":[{"name":"host","data_type":{"String":null},"is_nullable":false,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"cpu","data_type":{"Float64":{}},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"memory","data_type":{"Float64":{}},"is_nullable":false,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"ts","data_type":{"Timestamp":{"Millisecond":null}},"is_nullable":true,"is_time_index":true,"default_constraint":null,"metadata":{"greptime:time_index":"true"}}],"timestamp_index":3,"version":0},"primary_key_indices":[0],"value_indices":[1,2,3],"engine":"mito","next_column_id":1,"region_numbers":[],"engine_options":{},"options":{"write_buffer_size":null,"ttl":null,"extra_options":{}},"created_on":"2023-03-06T08:50:34.662020Z"},"table_type":"Base"}}"#;
- serde_json::from_str::<TableChange>(table_change).unwrap();
+ assert!(serde_json::from_str::<TableChange>(table_change).is_ok());
let table_remove =
r#"{"table_ident":{"table_id":42,"version":0},"table_name":"test_table"}"#;
- serde_json::from_str::<TableRemove>(table_remove).unwrap();
+ assert!(serde_json::from_str::<TableRemove>(table_remove).is_ok());
let protocol_action = r#"{"min_reader_version":0,"min_writer_version":1}"#;
- serde_json::from_str::<ProtocolAction>(protocol_action).unwrap();
+ assert!(serde_json::from_str::<ProtocolAction>(protocol_action).is_ok());
}
}
diff --git a/src/mito/src/table.rs b/src/mito/src/table.rs
index 4b758785c122..7f37b6d50732 100644
--- a/src/mito/src/table.rs
+++ b/src/mito/src/table.rs
@@ -328,7 +328,8 @@ impl<R: Region> Table for MitoTable<R> {
table_name,
new_info
);
- self.manifest
+ let _ = self
+ .manifest
.update(TableMetaActionList::with_action(TableMetaAction::Change(
Box::new(TableChange {
table_info: RawTableInfo::from(new_info.clone()),
@@ -371,7 +372,7 @@ impl<R: Region> Table for MitoTable<R> {
.delete(key_column_values)
.map_err(BoxedError::new)
.context(table_error::TableOperationSnafu)?;
- region
+ let _ = region
.write(&WriteContext::default(), write_request)
.await
.map_err(BoxedError::new)
@@ -404,10 +405,12 @@ impl<R: Region> Table for MitoTable<R> {
.context(table_error::TableOperationSnafu)?;
}
} else {
- futures::future::try_join_all(regions.values().map(|region| region.flush(&flush_ctx)))
- .await
- .map_err(BoxedError::new)
- .context(table_error::TableOperationSnafu)?;
+ let _ = futures::future::try_join_all(
+ regions.values().map(|region| region.flush(&flush_ctx)),
+ )
+ .await
+ .map_err(BoxedError::new)
+ .context(TableOperationSnafu)?;
}
Ok(())
@@ -600,12 +603,12 @@ impl<R: Region> MitoTable<R> {
region_numbers: &[RegionNumber],
) -> TableResult<HashMap<RegionNumber, R>> {
let mut removed = HashMap::with_capacity(region_numbers.len());
- self.regions.rcu(|regions| {
+ let _ = self.regions.rcu(|regions| {
removed.clear();
let mut regions = HashMap::clone(regions);
for region_number in region_numbers {
if let Some(region) = regions.remove(region_number) {
- removed.insert(*region_number, region);
+ let _ = removed.insert(*region_number, region);
}
}
@@ -618,7 +621,7 @@ impl<R: Region> MitoTable<R> {
pub async fn drop_regions(&self, region_number: &[RegionNumber]) -> TableResult<()> {
let regions = self.remove_regions(region_number).await?;
- futures::future::try_join_all(regions.values().map(|region| region.drop_region()))
+ let _ = futures::future::try_join_all(regions.values().map(|region| region.drop_region()))
.await
.map_err(BoxedError::new)
.context(table_error::TableOperationSnafu)?;
@@ -638,7 +641,7 @@ impl<R: Region> MitoTable<R> {
}
pub fn set_table_info(&self, table_info: TableInfo) {
- self.table_info.swap(Arc::new(table_info));
+ let _ = self.table_info.swap(Arc::new(table_info));
}
#[inline]
@@ -692,9 +695,9 @@ impl<R: Region> MitoTable<R> {
pub async fn load_region(&self, region_number: RegionNumber, region: R) -> TableResult<()> {
let info = self.table_info.load();
- self.regions.rcu(|regions| {
+ let _ = self.regions.rcu(|regions| {
let mut regions = HashMap::clone(regions);
- regions
+ let _ = regions
.entry(region_number)
.or_insert_with(|| region.clone());
diff --git a/src/mito/src/table/test_util.rs b/src/mito/src/table/test_util.rs
index b23cd8d4f5ae..c937958e7dce 100644
--- a/src/mito/src/table/test_util.rs
+++ b/src/mito/src/table/test_util.rs
@@ -102,7 +102,7 @@ pub async fn new_test_object_store(prefix: &str) -> (TempDir, ObjectStore) {
let dir = create_temp_dir(prefix);
let store_dir = dir.path().to_string_lossy();
let mut builder = Builder::default();
- builder.root(&store_dir);
+ let _ = builder.root(&store_dir);
(dir, ObjectStore::new(builder).unwrap().finish())
}
@@ -205,16 +205,17 @@ pub async fn setup_mock_engine_and_table(
}
pub async fn setup_table(table: Arc<dyn Table>) {
- let mut columns_values: HashMap<String, VectorRef> = HashMap::with_capacity(4);
let hosts: VectorRef = Arc::new(StringVector::from(vec!["host1", "host2", "host3", "host4"]));
let cpus: VectorRef = Arc::new(Float64Vector::from_vec(vec![1.0, 2.0, 3.0, 4.0]));
let memories: VectorRef = Arc::new(Float64Vector::from_vec(vec![1.0, 2.0, 3.0, 4.0]));
let tss: VectorRef = Arc::new(TimestampMillisecondVector::from_vec(vec![1, 2, 2, 1]));
- columns_values.insert("host".to_string(), hosts.clone());
- columns_values.insert("cpu".to_string(), cpus.clone());
- columns_values.insert("memory".to_string(), memories.clone());
- columns_values.insert("ts".to_string(), tss.clone());
+ let columns_values = HashMap::from([
+ ("host".to_string(), hosts),
+ ("cpu".to_string(), cpus),
+ ("memory".to_string(), memories),
+ ("ts".to_string(), tss),
+ ]);
let insert_req = new_insert_request("demo".to_string(), columns_values);
assert_eq!(4, table.insert(insert_req).await.unwrap());
diff --git a/src/mito/src/table/test_util/mock_engine.rs b/src/mito/src/table/test_util/mock_engine.rs
index 962faace4095..018c9e6816ea 100644
--- a/src/mito/src/table/test_util/mock_engine.rs
+++ b/src/mito/src/table/test_util/mock_engine.rs
@@ -208,10 +208,12 @@ impl Region for MockRegion {
impl MockRegionInner {
fn new(metadata: RegionMetadata) -> Self {
- let mut memtable = HashMap::new();
- for column in metadata.user_schema().column_schemas() {
- memtable.insert(column.name.clone(), vec![]);
- }
+ let memtable = metadata
+ .user_schema()
+ .column_schemas()
+ .iter()
+ .map(|x| (x.name.clone(), vec![]))
+ .collect();
Self {
name: metadata.name().to_string(),
metadata: ArcSwap::new(Arc::new(metadata)),
@@ -226,12 +228,12 @@ impl MockRegionInner {
// Now drop columns is not supported.
let rows = memtable.values().last().unwrap().len();
for column in metadata.user_schema().column_schemas() {
- memtable
+ let _ = memtable
.entry(column.name.clone())
.or_insert_with(|| vec![Value::Null; rows]);
}
}
- self.metadata.swap(Arc::new(metadata));
+ let _ = self.metadata.swap(Arc::new(metadata));
}
fn write(&self, request: WriteBatch) {
@@ -282,7 +284,7 @@ impl StorageEngine for MockEngine {
}
if let Some(region) = regions.closed_regions.remove(name) {
- regions
+ let _ = regions
.opened_regions
.insert(name.to_string(), region.clone());
return Ok(Some(region));
@@ -300,7 +302,7 @@ impl StorageEngine for MockEngine {
let mut regions = self.regions.lock().unwrap();
if let Some(region) = regions.opened_regions.remove(name) {
- regions.closed_regions.insert(name.to_string(), region);
+ let _ = regions.closed_regions.insert(name.to_string(), region);
}
Ok(())
@@ -324,7 +326,7 @@ impl StorageEngine for MockEngine {
let region = MockRegion {
inner: Arc::new(MockRegionInner::new(metadata)),
};
- regions.opened_regions.insert(name, region.clone());
+ let _ = regions.opened_regions.insert(name, region.clone());
Ok(region)
}
diff --git a/src/object-store/src/layers/lru_cache.rs b/src/object-store/src/layers/lru_cache.rs
index 5a3b2bf89b34..34ba6557e7ea 100644
--- a/src/object-store/src/layers/lru_cache.rs
+++ b/src/object-store/src/layers/lru_cache.rs
@@ -58,7 +58,7 @@ impl<C: Accessor + Clone> LruCacheLayer<C> {
let mut lru_cache = self.lru_cache.lock().await;
while let Some(entries) = pager.next().await? {
for entry in entries {
- lru_cache.push(entry.path().to_string(), ());
+ let _ = lru_cache.push(entry.path().to_string(), ());
}
}
@@ -128,7 +128,7 @@ impl<I: Accessor, C: Accessor> LayeredAccessor for LruCacheAccessor<I, C> {
// update lru when cache hit
let mut lru_cache = lru_cache.lock().await;
- lru_cache.get_or_insert(cache_path.clone(), || ());
+ let _ = lru_cache.get_or_insert(cache_path.clone(), || ());
Ok(to_output_reader((rp, r)))
}
Err(err) if err.kind() == ErrorKind::NotFound => {
@@ -187,7 +187,7 @@ impl<I: Accessor, C: Accessor> LayeredAccessor for LruCacheAccessor<I, C> {
.map(|(k, _v)| k.clone())
.collect::<Vec<_>>();
for k in &cache_files {
- lru.pop(k);
+ let _ = lru.pop(k);
}
cache_files
};
diff --git a/src/object-store/tests/object_store_test.rs b/src/object-store/tests/object_store_test.rs
index 3c06d7fe826d..7fad4229fe0b 100644
--- a/src/object-store/tests/object_store_test.rs
+++ b/src/object-store/tests/object_store_test.rs
@@ -47,7 +47,7 @@ async fn test_object_crud(store: &ObjectStore) -> Result<()> {
// Delete object.
store.delete(file_name).await.unwrap();
- store.read(file_name).await.unwrap_err();
+ assert!(store.read(file_name).await.is_err());
Ok(())
}
@@ -89,7 +89,7 @@ async fn test_fs_backend() -> Result<()> {
let data_dir = create_temp_dir("test_fs_backend");
let tmp_dir = create_temp_dir("test_fs_backend");
let mut builder = Fs::default();
- builder
+ let _ = builder
.root(&data_dir.path().to_string_lossy())
.atomic_write_dir(&tmp_dir.path().to_string_lossy());
@@ -111,7 +111,7 @@ async fn test_s3_backend() -> Result<()> {
let root = uuid::Uuid::new_v4().to_string();
let mut builder = S3::default();
- builder
+ let _ = builder
.root(&root)
.access_key_id(&env::var("GT_S3_ACCESS_KEY_ID")?)
.secret_access_key(&env::var("GT_S3_ACCESS_KEY")?)
@@ -140,7 +140,7 @@ async fn test_oss_backend() -> Result<()> {
let root = uuid::Uuid::new_v4().to_string();
let mut builder = Oss::default();
- builder
+ let _ = builder
.root(&root)
.access_key_id(&env::var("GT_OSS_ACCESS_KEY_ID")?)
.access_key_secret(&env::var("GT_OSS_ACCESS_KEY")?)
@@ -168,7 +168,7 @@ async fn test_azblob_backend() -> Result<()> {
let root = uuid::Uuid::new_v4().to_string();
let mut builder = Azblob::default();
- builder
+ let _ = builder
.root(&root)
.account_name(&env::var("GT_AZBLOB_ACCOUNT_NAME")?)
.account_key(&env::var("GT_AZBLOB_ACCOUNT_KEY")?)
@@ -238,7 +238,7 @@ async fn test_object_store_cache_policy() -> Result<()> {
// create file cache layer
let cache_dir = create_temp_dir("test_object_store_cache_policy_cache");
let mut builder = Fs::default();
- builder
+ let _ = builder
.root(&cache_dir.path().to_string_lossy())
.atomic_write_dir(&cache_dir.path().to_string_lossy());
let cache_accessor = Arc::new(builder.build().unwrap());
@@ -258,11 +258,11 @@ async fn test_object_store_cache_policy() -> Result<()> {
store.write(p2, "Hello, object2!").await.unwrap();
// create cache by read object
- store.range_read(p1, 0..).await?;
- store.read(p1).await?;
- store.range_read(p2, 0..).await?;
- store.range_read(p2, 7..).await?;
- store.read(p2).await?;
+ let _ = store.range_read(p1, 0..).await?;
+ let _ = store.read(p1).await?;
+ let _ = store.range_read(p2, 0..).await?;
+ let _ = store.range_read(p2, 7..).await?;
+ let _ = store.read(p2).await?;
assert_cache_files(
&cache_store,
@@ -300,8 +300,8 @@ async fn test_object_store_cache_policy() -> Result<()> {
let p3 = "test_file3";
store.write(p3, "Hello, object3!").await.unwrap();
- store.read(p3).await.unwrap();
- store.range_read(p3, 0..5).await.unwrap();
+ assert!(store.read(p3).await.is_ok());
+ assert!(store.range_read(p3, 0..5).await.is_ok());
assert_cache_files(
&cache_store,
diff --git a/src/partition/src/splitter.rs b/src/partition/src/splitter.rs
index 3634d57546db..101a5ac80412 100644
--- a/src/partition/src/splitter.rs
+++ b/src/partition/src/splitter.rs
@@ -68,7 +68,7 @@ impl WriteSplitter {
.context(MissingDefaultValueSnafu {
column: &column_schema.name,
})?;
- insert
+ let _ = insert
.columns_values
.insert(column_schema.name.clone(), default_values);
}
@@ -396,10 +396,7 @@ mod tests {
#[test]
fn test_partition_insert_request() {
let insert = mock_insert_request();
- let mut region_map: HashMap<RegionNumber, Vec<usize>> = HashMap::with_capacity(2);
- region_map.insert(1, vec![2, 0]);
- region_map.insert(2, vec![1]);
-
+ let region_map = HashMap::from([(1, vec![2, 0]), (2, vec![1])]);
let dist_insert = split_insert_request(&insert, region_map);
let r1_insert = dist_insert.get(&1_u32).unwrap();
@@ -511,19 +508,19 @@ mod tests {
builder.push(Some(true));
builder.push(Some(false));
builder.push(Some(true));
- columns_values.insert("enable_reboot".to_string(), builder.to_vector());
+ let _ = columns_values.insert("enable_reboot".to_string(), builder.to_vector());
let mut builder = StringVectorBuilder::with_capacity(3);
builder.push(Some("host1"));
builder.push(None);
builder.push(Some("host3"));
- columns_values.insert("host".to_string(), builder.to_vector());
+ let _ = columns_values.insert("host".to_string(), builder.to_vector());
let mut builder = Int16VectorBuilder::with_capacity(3);
builder.push(Some(1_i16));
builder.push(Some(2_i16));
builder.push(Some(3_i16));
- columns_values.insert("id".to_string(), builder.to_vector());
+ let _ = columns_values.insert("id".to_string(), builder.to_vector());
InsertRequest {
catalog_name: common_catalog::consts::DEFAULT_CATALOG_NAME.to_string(),
@@ -540,13 +537,13 @@ mod tests {
builder.push(Some(true));
builder.push(Some(false));
builder.push(Some(true));
- columns_values.insert("enable_reboot".to_string(), builder.to_vector());
+ let _ = columns_values.insert("enable_reboot".to_string(), builder.to_vector());
let mut builder = StringVectorBuilder::with_capacity(3);
builder.push(Some("host1"));
builder.push(None);
builder.push(Some("host3"));
- columns_values.insert("host".to_string(), builder.to_vector());
+ let _ = columns_values.insert("host".to_string(), builder.to_vector());
let insert_request = InsertRequest {
catalog_name: common_catalog::consts::DEFAULT_CATALOG_NAME.to_string(),
@@ -579,18 +576,18 @@ mod tests {
builder.push(Some(true));
builder.push(Some(false));
builder.push(Some(true));
- columns_values.insert("enable_reboot".to_string(), builder.to_vector());
+ let _ = columns_values.insert("enable_reboot".to_string(), builder.to_vector());
let mut builder = StringVectorBuilder::with_capacity(3);
builder.push(Some("host1"));
builder.push(None);
builder.push(Some("host3"));
- columns_values.insert("host".to_string(), builder.to_vector());
+ let _ = columns_values.insert("host".to_string(), builder.to_vector());
let mut builder = Int16VectorBuilder::with_capacity(1);
builder.push(Some(1_i16));
// two values are missing
- columns_values.insert("id".to_string(), builder.to_vector());
+ let _ = columns_values.insert("id".to_string(), builder.to_vector());
InsertRequest {
catalog_name: common_catalog::consts::DEFAULT_CATALOG_NAME.to_string(),
diff --git a/src/promql/src/extension_plan/range_manipulate.rs b/src/promql/src/extension_plan/range_manipulate.rs
index de56269e5e17..f8df42ea6284 100644
--- a/src/promql/src/extension_plan/range_manipulate.rs
+++ b/src/promql/src/extension_plan/range_manipulate.rs
@@ -415,7 +415,7 @@ impl RangeManipulateStream {
// transform columns
let mut new_columns = input.columns().to_vec();
for index in self.field_columns.iter() {
- other_columns.remove(index);
+ let _ = other_columns.remove(index);
let column = input.column(*index);
let new_column = Arc::new(
RangeArray::from_ranges(column.clone(), ranges.clone())
diff --git a/src/promql/src/planner.rs b/src/promql/src/planner.rs
index 51091e89f935..9abe9d05cb3c 100644
--- a/src/promql/src/planner.rs
+++ b/src/promql/src/planner.rs
@@ -487,7 +487,7 @@ impl PromPlanner {
.get_or_insert_default()
.push(matcher.clone());
} else {
- matchers.insert(matcher.clone());
+ let _ = matchers.insert(matcher.clone());
}
}
Ok(Matchers { matchers })
@@ -538,7 +538,7 @@ impl PromPlanner {
match &matcher.op {
MatchOp::Equal => {
if col_set.contains(&matcher.value) {
- result_set.insert(matcher.value.clone());
+ let _ = result_set.insert(matcher.value.clone());
} else {
return Err(ColumnNotFoundSnafu {
col: matcher.value.clone(),
@@ -548,7 +548,7 @@ impl PromPlanner {
}
MatchOp::NotEqual => {
if col_set.contains(&matcher.value) {
- reverse_set.insert(matcher.value.clone());
+ let _ = reverse_set.insert(matcher.value.clone());
} else {
return Err(ColumnNotFoundSnafu {
col: matcher.value.clone(),
@@ -559,14 +559,14 @@ impl PromPlanner {
MatchOp::Re(regex) => {
for col in &self.ctx.field_columns {
if regex.is_match(col) {
- result_set.insert(col.clone());
+ let _ = result_set.insert(col.clone());
}
}
}
MatchOp::NotRe(regex) => {
for col in &self.ctx.field_columns {
if regex.is_match(col) {
- reverse_set.insert(col.clone());
+ let _ = reverse_set.insert(col.clone());
}
}
}
@@ -577,7 +577,7 @@ impl PromPlanner {
result_set = col_set.into_iter().cloned().collect();
}
for col in reverse_set {
- result_set.remove(&col);
+ let _ = result_set.remove(&col);
}
self.ctx.field_columns = result_set.iter().cloned().collect();
@@ -670,15 +670,15 @@ impl PromPlanner {
// remove "without"-ed fields
// nonexistence label will be ignored
for label in labels {
- all_fields.remove(label);
+ let _ = all_fields.remove(label);
}
// remove time index and value fields
if let Some(time_index) = &self.ctx.time_index_column {
- all_fields.remove(time_index);
+ let _ = all_fields.remove(time_index);
}
for value in &self.ctx.field_columns {
- all_fields.remove(value);
+ let _ = all_fields.remove(value);
}
// change the tag columns in context
@@ -927,7 +927,7 @@ impl PromPlanner {
args: other_input_exprs.clone(),
});
exprs.push(fn_expr);
- other_input_exprs.remove(field_column_pos);
+ let _ = other_input_exprs.remove(field_column_pos);
}
ScalarFunc::Udf(fun) => {
let ts_range_expr = DfExpr::Column(Column::from_name(
@@ -942,8 +942,8 @@ impl PromPlanner {
args: other_input_exprs.clone(),
});
exprs.push(fn_expr);
- other_input_exprs.remove(field_column_pos + 1);
- other_input_exprs.remove(field_column_pos);
+ let _ = other_input_exprs.remove(field_column_pos + 1);
+ let _ = other_input_exprs.remove(field_column_pos);
}
ScalarFunc::ExtrapolateUdf(fun) => {
let ts_range_expr = DfExpr::Column(Column::from_name(
@@ -960,9 +960,9 @@ impl PromPlanner {
args: other_input_exprs.clone(),
});
exprs.push(fn_expr);
- other_input_exprs.remove(field_column_pos + 2);
- other_input_exprs.remove(field_column_pos + 1);
- other_input_exprs.remove(field_column_pos);
+ let _ = other_input_exprs.remove(field_column_pos + 2);
+ let _ = other_input_exprs.remove(field_column_pos + 1);
+ let _ = other_input_exprs.remove(field_column_pos);
}
}
}
@@ -1338,7 +1338,7 @@ mod test {
.unwrap();
let table = Arc::new(EmptyTable::from_table_info(&table_info));
let catalog_list = Arc::new(MemoryCatalogManager::default());
- catalog_list
+ assert!(catalog_list
.register_table(RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
@@ -1347,7 +1347,7 @@ mod test {
table,
})
.await
- .unwrap();
+ .is_ok());
DfTableSourceProvider::new(catalog_list, false, &QueryContext::new())
}
diff --git a/src/query/src/datafusion.rs b/src/query/src/datafusion.rs
index 4dd7c1f21bef..26002dff6371 100644
--- a/src/query/src/datafusion.rs
+++ b/src/query/src/datafusion.rs
@@ -415,7 +415,7 @@ mod tests {
table_id: NUMBERS_TABLE_ID,
table: Arc::new(NumbersTable::default()),
};
- catalog_manager.register_table(req).await.unwrap();
+ assert!(catalog_manager.register_table(req).await.is_ok());
QueryEngineFactory::new(catalog_manager, false).query_engine()
}
diff --git a/src/query/src/datafusion/planner.rs b/src/query/src/datafusion/planner.rs
index abf02796b418..7b6d4d627df8 100644
--- a/src/query/src/datafusion/planner.rs
+++ b/src/query/src/datafusion/planner.rs
@@ -87,7 +87,7 @@ async fn resolve_tables(
// because the table name may be a temporary name of CTE or view, they can't be found until plan
// execution.
if let Ok(table) = table_provider.resolve_table(table_name).await {
- v.insert(table);
+ let _ = v.insert(table);
}
}
}
diff --git a/src/query/src/dist_plan/analyzer.rs b/src/query/src/dist_plan/analyzer.rs
index 19c69d5ad907..7e274fc15582 100644
--- a/src/query/src/dist_plan/analyzer.rs
+++ b/src/query/src/dist_plan/analyzer.rs
@@ -42,7 +42,7 @@ impl AnalyzerRule for DistPlannerAnalyzer {
// (2) transform up merge scan
let mut visitor = CommutativeVisitor::new();
- plan.visit(&mut visitor)?;
+ let _ = plan.visit(&mut visitor)?;
let state = ExpandState::new();
let plan = plan.transform_down(&|plan| Self::expand(plan, &visitor, &state))?;
diff --git a/src/query/src/dist_plan/planner.rs b/src/query/src/dist_plan/planner.rs
index faa384cd56e5..9b4ac6376a37 100644
--- a/src/query/src/dist_plan/planner.rs
+++ b/src/query/src/dist_plan/planner.rs
@@ -115,7 +115,7 @@ impl DistExtensionPlanner {
/// Extract table name from logical plan
fn get_table_name(&self, plan: &LogicalPlan) -> Result<Option<TableName>> {
let mut extractor = TableNameExtractor::default();
- plan.visit(&mut extractor)?;
+ let _ = plan.visit(&mut extractor)?;
Ok(extractor.table_name)
}
diff --git a/src/query/src/optimizer/order_hint.rs b/src/query/src/optimizer/order_hint.rs
index 6fd534e796f4..c2c0d41197ba 100644
--- a/src/query/src/optimizer/order_hint.rs
+++ b/src/query/src/optimizer/order_hint.rs
@@ -43,7 +43,7 @@ impl OptimizerRule for OrderHintRule {
impl OrderHintRule {
fn optimize(plan: &LogicalPlan) -> DataFusionResult<LogicalPlan> {
let mut visitor = OrderHintVisitor::default();
- plan.visit(&mut visitor)?;
+ let _ = plan.visit(&mut visitor)?;
if let Some(order_expr) = visitor.order_expr.take() {
plan.clone()
@@ -149,7 +149,7 @@ mod test {
.unwrap();
let context = OptimizerContext::default();
- OrderHintRule.try_optimize(&plan, &context).unwrap();
+ assert!(OrderHintRule.try_optimize(&plan, &context).is_ok());
// should read the first (with `.sort(true, false)`) sort option
let scan_req = adapter.get_scan_req();
diff --git a/src/query/src/query_engine/state.rs b/src/query/src/query_engine/state.rs
index a099df424039..ca123fe1ccb0 100644
--- a/src/query/src/query_engine/state.rs
+++ b/src/query/src/query_engine/state.rs
@@ -139,7 +139,7 @@ impl QueryEngineState {
}
}
if let Some(index) = index_to_move {
- rules.remove(index);
+ let _ = rules.remove(index);
}
}
diff --git a/src/query/src/sql/show.rs b/src/query/src/sql/show.rs
index 8b28c77db1c9..7b4db61fcbf3 100644
--- a/src/query/src/sql/show.rs
+++ b/src/query/src/sql/show.rs
@@ -276,15 +276,15 @@ WITH(
let schema_name = "public".to_string();
let catalog_name = "greptime".to_string();
let mut options: TableOptions = Default::default();
- options.extra_options.insert(
+ let _ = options.extra_options.insert(
IMMUTABLE_TABLE_LOCATION_KEY.to_string(),
"foo.csv".to_string(),
);
- options.extra_options.insert(
+ let _ = options.extra_options.insert(
IMMUTABLE_TABLE_META_KEY.to_string(),
"{{\"files\":[\"foo.csv\"]}}".to_string(),
);
- options
+ let _ = options
.extra_options
.insert(IMMUTABLE_TABLE_FORMAT_KEY.to_string(), "csv".to_string());
let meta = TableMetaBuilder::default()
diff --git a/src/query/src/tests/query_engine_test.rs b/src/query/src/tests/query_engine_test.rs
index 673c439f1bc0..cb613967d8fc 100644
--- a/src/query/src/tests/query_engine_test.rs
+++ b/src/query/src/tests/query_engine_test.rs
@@ -112,7 +112,7 @@ fn catalog_manager() -> Result<Arc<MemoryCatalogManager>> {
table_id: NUMBERS_TABLE_ID,
table: Arc::new(NumbersTable::default()),
};
- catalog_manager.register_table_sync(req).unwrap();
+ assert!(catalog_manager.register_table_sync(req).is_ok());
Ok(catalog_manager)
}
diff --git a/src/query/src/tests/time_range_filter_test.rs b/src/query/src/tests/time_range_filter_test.rs
index 73fe78d748f1..49d32bb712bb 100644
--- a/src/query/src/tests/time_range_filter_test.rs
+++ b/src/query/src/tests/time_range_filter_test.rs
@@ -124,7 +124,7 @@ fn create_test_engine() -> TimeRangeTester {
table_id: table.table_info().ident.table_id,
table: table.clone(),
};
- catalog_manager.register_table_sync(req).unwrap();
+ let _ = catalog_manager.register_table_sync(req).unwrap();
let engine = QueryEngineFactory::new(catalog_manager, false).query_engine();
TimeRangeTester { engine, table }
diff --git a/src/script/benches/py_benchmark.rs b/src/script/benches/py_benchmark.rs
index eb97e16f0cc7..a888db4f6fb2 100644
--- a/src/script/benches/py_benchmark.rs
+++ b/src/script/benches/py_benchmark.rs
@@ -110,7 +110,7 @@ def entry() -> vector[i64]:
);
let source = Arc::new(source);
// execute the script in parallel for every thread in the pool
- pool.broadcast(|_| {
+ let _ = pool.broadcast(|_| {
let source = source.clone();
let rt = get_local_runtime().unwrap();
rt.block_on(async move {
@@ -163,44 +163,44 @@ fn criterion_benchmark(c: &mut Criterion) {
// which require a local mock library
// TODO(discord9): revisit once mock library is ready
- c.bench_function("fib 20 rspy", |b| {
- b.to_async(tokio::runtime::Runtime::new().unwrap())
- .iter(|| fibonacci(black_box(20), "rspy"))
- });
- c.bench_function("fib 20 pyo3", |b| {
- b.to_async(tokio::runtime::Runtime::new().unwrap())
- .iter(|| fibonacci(black_box(20), "pyo3"))
- });
-
let pool = rayon::ThreadPoolBuilder::new()
.num_threads(16)
.build()
.unwrap();
- c.bench_function("par fib 20 rspy", |b| {
- b.to_async(tokio::runtime::Runtime::new().unwrap())
- .iter(|| parallel_fibonacci(black_box(20), "rspy", &pool))
- });
- c.bench_function("par fib 20 pyo3", |b| {
- b.to_async(tokio::runtime::Runtime::new().unwrap())
- .iter(|| parallel_fibonacci(black_box(20), "pyo3", &pool))
- });
- c.bench_function("loop 1M rspy", |b| {
- b.to_async(tokio::runtime::Runtime::new().unwrap())
- .iter(|| loop_1_million(black_box("rspy")))
- });
- c.bench_function("loop 1M pyo3", |b| {
- b.to_async(tokio::runtime::Runtime::new().unwrap())
- .iter(|| loop_1_million(black_box("pyo3")))
- });
- c.bench_function("api heavy rspy", |b| {
- b.to_async(tokio::runtime::Runtime::new().unwrap())
- .iter(|| api_heavy(black_box("rspy")))
- });
- c.bench_function("api heavy pyo3", |b| {
- b.to_async(tokio::runtime::Runtime::new().unwrap())
- .iter(|| api_heavy(black_box("pyo3")))
- });
+ let _ = c
+ .bench_function("fib 20 rspy", |b| {
+ b.to_async(tokio::runtime::Runtime::new().unwrap())
+ .iter(|| fibonacci(black_box(20), "rspy"))
+ })
+ .bench_function("fib 20 pyo3", |b| {
+ b.to_async(tokio::runtime::Runtime::new().unwrap())
+ .iter(|| fibonacci(black_box(20), "pyo3"))
+ })
+ .bench_function("par fib 20 rspy", |b| {
+ b.to_async(tokio::runtime::Runtime::new().unwrap())
+ .iter(|| parallel_fibonacci(black_box(20), "rspy", &pool))
+ })
+ .bench_function("par fib 20 pyo3", |b| {
+ b.to_async(tokio::runtime::Runtime::new().unwrap())
+ .iter(|| parallel_fibonacci(black_box(20), "pyo3", &pool))
+ })
+ .bench_function("loop 1M rspy", |b| {
+ b.to_async(tokio::runtime::Runtime::new().unwrap())
+ .iter(|| loop_1_million(black_box("rspy")))
+ })
+ .bench_function("loop 1M pyo3", |b| {
+ b.to_async(tokio::runtime::Runtime::new().unwrap())
+ .iter(|| loop_1_million(black_box("pyo3")))
+ })
+ .bench_function("api heavy rspy", |b| {
+ b.to_async(tokio::runtime::Runtime::new().unwrap())
+ .iter(|| api_heavy(black_box("rspy")))
+ })
+ .bench_function("api heavy pyo3", |b| {
+ b.to_async(tokio::runtime::Runtime::new().unwrap())
+ .iter(|| api_heavy(black_box("pyo3")))
+ });
}
criterion_group!(benches, criterion_benchmark);
diff --git a/src/script/src/manager.rs b/src/script/src/manager.rs
index 6af2006e8c4d..c5907ecfbbf7 100644
--- a/src/script/src/manager.rs
+++ b/src/script/src/manager.rs
@@ -51,7 +51,7 @@ impl ScriptManager {
{
let mut compiled = self.compiled.write().unwrap();
- compiled.insert(name.to_string(), script.clone());
+ let _ = compiled.insert(name.to_string(), script.clone());
}
logging::info!("Compiled and cached script: {}", name);
diff --git a/src/script/src/python/engine.rs b/src/script/src/python/engine.rs
index 5dec2c4f37da..1a34721d5481 100644
--- a/src/script/src/python/engine.rs
+++ b/src/script/src/python/engine.rs
@@ -419,9 +419,10 @@ def test(**params) -> vector[i64]:
.compile(script, CompileContext::default())
.await
.unwrap();
- let mut params = HashMap::new();
- params.insert("a".to_string(), "30".to_string());
- params.insert("b".to_string(), "12".to_string());
+ let params = HashMap::from([
+ ("a".to_string(), "30".to_string()),
+ ("b".to_string(), "12".to_string()),
+ ]);
let _output = script
.execute(params, EvalContext::default())
.await
diff --git a/src/script/src/python/ffi_types/copr/parse.rs b/src/script/src/python/ffi_types/copr/parse.rs
index 537e91f54d54..2c27b8bbd681 100644
--- a/src/script/src/python/ffi_types/copr/parse.rs
+++ b/src/script/src/python/ffi_types/copr/parse.rs
@@ -292,7 +292,7 @@ fn parse_keywords(keywords: &Vec<ast::Keyword<()>>) -> Result<DecoratorArgs> {
Some(kw.location),
);
} else {
- visited_key.insert(s);
+ let _ = visited_key.insert(s);
}
match s {
"args" => ret_args.arg_names = Some(pylist_to_vec(&kw.node.value)?),
diff --git a/src/script/src/python/ffi_types/pair_tests.rs b/src/script/src/python/ffi_types/pair_tests.rs
index 974624aae88e..fe3ea978f9be 100644
--- a/src/script/src/python/ffi_types/pair_tests.rs
+++ b/src/script/src/python/ffi_types/pair_tests.rs
@@ -96,7 +96,7 @@ async fn integrated_py_copr_test() {
let mut actual_result = HashMap::new();
for col_sch in rb.schema.column_schemas() {
let col = rb.column_by_name(&col_sch.name).unwrap();
- actual_result.insert(col_sch.name.clone(), col.clone());
+ let _ = actual_result.insert(col_sch.name.clone(), col.clone());
}
for (name, col) in expect_result {
let actual_col = actual_result.get(&name).unwrap_or_else(|| {
diff --git a/src/script/src/python/ffi_types/vector/tests.rs b/src/script/src/python/ffi_types/vector/tests.rs
index a0cd1e3bcc29..52794b80cd14 100644
--- a/src/script/src/python/ffi_types/vector/tests.rs
+++ b/src/script/src/python/ffi_types/vector/tests.rs
@@ -156,7 +156,7 @@ fn eval_pyo3(testcase: TestCase, locals: HashMap<String, PyVector>) {
fn eval_rspy(testcase: TestCase, locals: HashMap<String, PyVector>) {
vm::Interpreter::with_init(Default::default(), |vm| {
- PyVector::make_class(&vm.ctx);
+ let _ = PyVector::make_class(&vm.ctx);
})
.enter(|vm| {
let scope = vm.new_scope_with_builtins();
diff --git a/src/script/src/python/rspython/builtins.rs b/src/script/src/python/rspython/builtins.rs
index fc78c61c2ef0..e75426271fcd 100644
--- a/src/script/src/python/rspython/builtins.rs
+++ b/src/script/src/python/rspython/builtins.rs
@@ -998,11 +998,7 @@ pub(crate) mod greptime_builtin {
let windows = {
slices
.iter()
- .zip({
- let mut it = slices.iter();
- it.next();
- it
- })
+ .zip(slices.iter().skip(1))
.map(|(first, second)| {
let left = comparison::gt_eq_scalar(ts, *first).map_err(arrow_error)?;
let right = comparison::lt_eq_scalar(ts, *second).map_err(arrow_error)?;
diff --git a/src/script/src/python/rspython/builtins/test.rs b/src/script/src/python/rspython/builtins/test.rs
index e97f5061058e..3c6ce9c604cf 100644
--- a/src/script/src/python/rspython/builtins/test.rs
+++ b/src/script/src/python/rspython/builtins/test.rs
@@ -39,7 +39,7 @@ use crate::python::utils::format_py_error;
fn convert_scalar_to_py_obj_and_back() {
rustpython_vm::Interpreter::with_init(Default::default(), |vm| {
// this can be in `.enter()` closure, but for clearity, put it in the `with_init()`
- PyVector::make_class(&vm.ctx);
+ let _ = PyVector::make_class(&vm.ctx);
})
.enter(|vm| {
let col = DFColValue::Scalar(ScalarValue::Float64(Some(1.0)));
@@ -311,12 +311,11 @@ fn run_builtin_fn_testcases() {
let loc = loc.to_str().expect("Fail to parse path");
let mut file = File::open(loc).expect("Fail to open file");
let mut buf = String::new();
- file.read_to_string(&mut buf)
- .expect("Fail to read to string");
+ assert!(file.read_to_string(&mut buf).is_ok());
let testcases: Vec<TestCase> = from_ron_string(&buf).expect("Fail to convert to testcases");
let cached_vm = rustpython_vm::Interpreter::with_init(Default::default(), |vm| {
vm.add_native_module("greptime", Box::new(greptime_builtin::make_module));
- PyVector::make_class(&vm.ctx);
+ let _ = PyVector::make_class(&vm.ctx);
});
for (idx, case) in testcases.into_iter().enumerate() {
info!("Testcase {idx} ...");
@@ -423,7 +422,7 @@ fn test_vm() {
rustpython_vm::Interpreter::with_init(Default::default(), |vm| {
vm.add_native_module("udf_builtins", Box::new(greptime_builtin::make_module));
// this can be in `.enter()` closure, but for clearity, put it in the `with_init()`
- PyVector::make_class(&vm.ctx);
+ let _ = PyVector::make_class(&vm.ctx);
})
.enter(|vm| {
let values = vec![1.0, 2.0, 3.0];
diff --git a/src/script/src/python/rspython/copr_impl.rs b/src/script/src/python/rspython/copr_impl.rs
index 6f4c1792739c..e68906f31321 100644
--- a/src/script/src/python/rspython/copr_impl.rs
+++ b/src/script/src/python/rspython/copr_impl.rs
@@ -215,9 +215,9 @@ pub(crate) fn init_interpreter() -> Arc<Interpreter> {
// add this line for stdlib, so rustpython can found stdlib's python part in bytecode format
vm.add_frozen(rustpython_pylib::FROZEN_STDLIB);
// add our own custom datatype and module
- PyVector::make_class(&vm.ctx);
- PyQueryEngine::make_class(&vm.ctx);
- PyRecordBatch::make_class(&vm.ctx);
+ let _ = PyVector::make_class(&vm.ctx);
+ let _ = PyQueryEngine::make_class(&vm.ctx);
+ let _ = PyRecordBatch::make_class(&vm.ctx);
init_greptime_builtins("greptime", vm);
init_data_frame("data_frame", vm);
}));
diff --git a/src/script/src/python/rspython/dataframe_impl.rs b/src/script/src/python/rspython/dataframe_impl.rs
index deeabc030612..88d210c436f4 100644
--- a/src/script/src/python/rspython/dataframe_impl.rs
+++ b/src/script/src/python/rspython/dataframe_impl.rs
@@ -17,8 +17,8 @@ use rustpython_vm::{pymodule as rspymodule, VirtualMachine};
use crate::python::rspython::builtins::greptime_builtin::PyDataFrame;
pub(crate) fn init_data_frame(module_name: &str, vm: &mut VirtualMachine) {
- PyDataFrame::make_class(&vm.ctx);
- data_frame::PyExpr::make_class(&vm.ctx);
+ let _ = PyDataFrame::make_class(&vm.ctx);
+ let _ = data_frame::PyExpr::make_class(&vm.ctx);
vm.add_native_module(module_name.to_owned(), Box::new(data_frame::make_module));
}
/// with `register_batch`, and then wrap DataFrame API in it
diff --git a/src/script/src/python/rspython/test.rs b/src/script/src/python/rspython/test.rs
index 927765116841..e250a442e104 100644
--- a/src/script/src/python/rspython/test.rs
+++ b/src/script/src/python/rspython/test.rs
@@ -94,8 +94,7 @@ fn run_ron_testcases() {
let loc = loc.to_str().expect("Fail to parse path");
let mut file = File::open(loc).expect("Fail to open file");
let mut buf = String::new();
- file.read_to_string(&mut buf)
- .expect("Fail to read to string");
+ assert!(file.read_to_string(&mut buf).is_ok());
let testcases: Vec<TestCase> = from_ron_string(&buf).expect("Fail to convert to testcases");
info!("Read {} testcases from {}", testcases.len(), loc);
for testcase in testcases {
diff --git a/src/script/src/table.rs b/src/script/src/table.rs
index aa9cd83d13ec..fdbbe0674120 100644
--- a/src/script/src/table.rs
+++ b/src/script/src/table.rs
@@ -183,38 +183,36 @@ impl ScriptsTable {
}
pub async fn insert(&self, schema: &str, name: &str, script: &str) -> Result<()> {
- let mut columns_values: HashMap<String, VectorRef> = HashMap::with_capacity(8);
- columns_values.insert(
- "schema".to_string(),
- Arc::new(StringVector::from(vec![schema])) as _,
- );
- columns_values.insert(
- "name".to_string(),
- Arc::new(StringVector::from(vec![name])) as _,
- );
- columns_values.insert(
- "script".to_string(),
- Arc::new(StringVector::from(vec![script])) as _,
- );
- // TODO(dennis): we only supports python right now.
- columns_values.insert(
- "engine".to_string(),
- Arc::new(StringVector::from(vec!["python"])) as _,
- );
- // Timestamp in key part is intentionally left to 0
- columns_values.insert(
- "timestamp".to_string(),
- Arc::new(TimestampMillisecondVector::from_slice([0])) as _,
- );
let now = util::current_time_millis();
- columns_values.insert(
- "gmt_created".to_string(),
- Arc::new(TimestampMillisecondVector::from_slice([now])) as _,
- );
- columns_values.insert(
- "gmt_modified".to_string(),
- Arc::new(TimestampMillisecondVector::from_slice([now])) as _,
- );
+ let columns_values: HashMap<String, VectorRef> = HashMap::from([
+ (
+ "schema".to_string(),
+ Arc::new(StringVector::from(vec![schema])) as VectorRef,
+ ),
+ ("name".to_string(), Arc::new(StringVector::from(vec![name]))),
+ (
+ "script".to_string(),
+ Arc::new(StringVector::from(vec![script])) as VectorRef,
+ ),
+ (
+ "engine".to_string(),
+ // TODO(dennis): we only supports python right now.
+ Arc::new(StringVector::from(vec!["python"])) as VectorRef,
+ ),
+ (
+ "timestamp".to_string(),
+ // Timestamp in key part is intentionally left to 0
+ Arc::new(TimestampMillisecondVector::from_slice([0])) as VectorRef,
+ ),
+ (
+ "gmt_created".to_string(),
+ Arc::new(TimestampMillisecondVector::from_slice([now])) as VectorRef,
+ ),
+ (
+ "gmt_modified".to_string(),
+ Arc::new(TimestampMillisecondVector::from_slice([now])) as VectorRef,
+ ),
+ ]);
let table = self
.catalog_manager
.table(
diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs
index 317953d9700c..84bead69a64b 100644
--- a/src/servers/src/error.rs
+++ b/src/servers/src/error.rs
@@ -449,11 +449,11 @@ impl From<Error> for tonic::Status {
// (which is a very rare case), just ignore. Client will use Tonic status code and message.
let status_code = err.status_code();
if let Ok(code) = HeaderValue::from_bytes(status_code.to_string().as_bytes()) {
- headers.insert(INNER_ERROR_CODE, code);
+ let _ = headers.insert(INNER_ERROR_CODE, code);
}
let root_error = err.iter_chain().last().unwrap();
if let Ok(err_msg) = HeaderValue::from_bytes(root_error.to_string().as_bytes()) {
- headers.insert(INNER_ERROR_MSG, err_msg);
+ let _ = headers.insert(INNER_ERROR_MSG, err_msg);
}
let metadata = MetadataMap::from_headers(headers);
diff --git a/src/servers/src/grpc.rs b/src/servers/src/grpc.rs
index 64cba0037874..f9d6e64902b5 100644
--- a/src/servers/src/grpc.rs
+++ b/src/servers/src/grpc.rs
@@ -188,7 +188,7 @@ impl Server for GrpcServer {
let mut serve_state = self.serve_state.lock().await;
*serve_state = Some(serve_state_rx);
- common_runtime::spawn_bg(async move {
+ let _handle = common_runtime::spawn_bg(async move {
let result = builder
.serve_with_incoming_shutdown(TcpListenerStream::new(listener), rx.map(drop))
.await
diff --git a/src/servers/src/grpc/handler.rs b/src/servers/src/grpc/handler.rs
index 777dc91588a1..7839a322b63d 100644
--- a/src/servers/src/grpc/handler.rs
+++ b/src/servers/src/grpc/handler.rs
@@ -114,7 +114,7 @@ impl GreptimeRequestHandler {
})
.context(NotFoundAuthHeaderSnafu)?;
- match auth_scheme {
+ let _ = match auth_scheme {
AuthScheme::Basic(Basic { username, password }) => user_provider
.auth(
Identity::UserId(&username, None),
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index c1b809e5e5a8..e6b6a3e1c792 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -397,42 +397,42 @@ impl HttpServerBuilder {
}
pub fn with_sql_handler(&mut self, handler: ServerSqlQueryHandlerRef) -> &mut Self {
- self.inner.sql_handler.get_or_insert(handler);
+ let _ = self.inner.sql_handler.get_or_insert(handler);
self
}
pub fn with_grpc_handler(&mut self, handler: ServerGrpcQueryHandlerRef) -> &mut Self {
- self.inner.grpc_handler.get_or_insert(handler);
+ let _ = self.inner.grpc_handler.get_or_insert(handler);
self
}
pub fn with_opentsdb_handler(&mut self, handler: OpentsdbProtocolHandlerRef) -> &mut Self {
- self.inner.opentsdb_handler.get_or_insert(handler);
+ let _ = self.inner.opentsdb_handler.get_or_insert(handler);
self
}
pub fn with_script_handler(&mut self, handler: ScriptHandlerRef) -> &mut Self {
- self.inner.script_handler.get_or_insert(handler);
+ let _ = self.inner.script_handler.get_or_insert(handler);
self
}
pub fn with_influxdb_handler(&mut self, handler: InfluxdbLineProtocolHandlerRef) -> &mut Self {
- self.inner.influxdb_handler.get_or_insert(handler);
+ let _ = self.inner.influxdb_handler.get_or_insert(handler);
self
}
pub fn with_prom_handler(&mut self, handler: PrometheusProtocolHandlerRef) -> &mut Self {
- self.inner.prom_handler.get_or_insert(handler);
+ let _ = self.inner.prom_handler.get_or_insert(handler);
self
}
pub fn with_user_provider(&mut self, user_provider: UserProviderRef) -> &mut Self {
- self.inner.user_provider.get_or_insert(user_provider);
+ let _ = self.inner.user_provider.get_or_insert(user_provider);
self
}
pub fn with_metrics_handler(&mut self, handler: MetricsHandler) -> &mut Self {
- self.inner.metrics_handler.get_or_insert(handler);
+ let _ = self.inner.metrics_handler.get_or_insert(handler);
self
}
diff --git a/src/servers/src/http/admin.rs b/src/servers/src/http/admin.rs
index 7125b89990b0..1a079ce568dc 100644
--- a/src/servers/src/http/admin.rs
+++ b/src/servers/src/http/admin.rs
@@ -62,6 +62,6 @@ pub async fn flush(
})),
});
- grpc_handler.do_query(request, QueryContext::arc()).await?;
+ let _ = grpc_handler.do_query(request, QueryContext::arc()).await?;
Ok((StatusCode::NO_CONTENT, ()))
}
diff --git a/src/servers/src/http/authorize.rs b/src/servers/src/http/authorize.rs
index 5879a1f2873d..f831fbf0e5d7 100644
--- a/src/servers/src/http/authorize.rs
+++ b/src/servers/src/http/authorize.rs
@@ -75,7 +75,7 @@ where
let user_provider = if let Some(user_provider) = user_provider.filter(|_| need_auth) {
user_provider
} else {
- request.extensions_mut().insert(UserInfo::default());
+ let _ = request.extensions_mut().insert(UserInfo::default());
return Ok(request);
};
@@ -119,7 +119,7 @@ where
.await
{
Ok(userinfo) => {
- request.extensions_mut().insert(userinfo);
+ let _ = request.extensions_mut().insert(userinfo);
Ok(request)
}
Err(e) => {
diff --git a/src/servers/src/metrics.rs b/src/servers/src/metrics.rs
index 6048d99322bb..ea0aeeadd78e 100644
--- a/src/servers/src/metrics.rs
+++ b/src/servers/src/metrics.rs
@@ -112,7 +112,7 @@ impl JemallocCollector {
}
pub(crate) fn update(&self) -> error::Result<()> {
- self.epoch.advance().context(UpdateJemallocMetricsSnafu)?;
+ let _ = self.epoch.advance().context(UpdateJemallocMetricsSnafu)?;
let allocated = self.allocated.read().context(UpdateJemallocMetricsSnafu)?;
let resident = self.resident.read().context(UpdateJemallocMetricsSnafu)?;
gauge!(METRIC_JEMALLOC_ALLOCATED, allocated as f64);
diff --git a/src/servers/src/mysql/handler.rs b/src/servers/src/mysql/handler.rs
index 6205829fcddd..1bef56a72734 100644
--- a/src/servers/src/mysql/handler.rs
+++ b/src/servers/src/mysql/handler.rs
@@ -138,7 +138,7 @@ impl MysqlInstanceShim {
fn save_plan(&self, plan: SqlPlan) -> u32 {
let stmt_id = self.prepared_stmts_counter.fetch_add(1, Ordering::Relaxed);
let mut prepared_stmts = self.prepared_stmts.write();
- prepared_stmts.insert(stmt_id, plan);
+ let _ = prepared_stmts.insert(stmt_id, plan);
stmt_id
}
@@ -317,7 +317,7 @@ impl<W: AsyncWrite + Send + Sync + Unpin> AsyncMysqlShim<W> for MysqlInstanceShi
W: 'async_trait,
{
let mut guard = self.prepared_stmts.write();
- guard.remove(&stmt_id);
+ let _ = guard.remove(&stmt_id);
}
async fn on_query<'a>(
diff --git a/src/servers/src/mysql/helper.rs b/src/servers/src/mysql/helper.rs
index e734b821c280..8a86805b41bf 100644
--- a/src/servers/src/mysql/helper.rs
+++ b/src/servers/src/mysql/helper.rs
@@ -78,7 +78,7 @@ where
V: VisitMut,
{
let mut index = 1;
- visit_expressions_mut(v, |expr| {
+ let _ = visit_expressions_mut(v, |expr| {
if let Expr::Value(ValueExpr::Placeholder(s)) = expr {
*s = format_placeholder(index);
index += 1;
diff --git a/src/servers/src/mysql/server.rs b/src/servers/src/mysql/server.rs
index 29f3e550d22a..3a96ca2432be 100644
--- a/src/servers/src/mysql/server.rs
+++ b/src/servers/src/mysql/server.rs
@@ -155,7 +155,7 @@ impl MysqlServer {
spawn_config: Arc<MysqlSpawnConfig>,
) -> Result<()> {
info!("MySQL connection coming from: {}", stream.peer_addr()?);
- io_runtime.spawn(async move {
+ let _handle = io_runtime.spawn(async move {
increment_gauge!(crate::metrics::METRIC_MYSQL_CONNECTIONS, 1.0);
if let Err(e) = Self::do_handle(stream, spawn_ref, spawn_config).await {
// TODO(LFC): Write this error to client as well, in MySQL text protocol.
diff --git a/src/servers/src/opentsdb.rs b/src/servers/src/opentsdb.rs
index b6923e6b039d..93860fc45bb6 100644
--- a/src/servers/src/opentsdb.rs
+++ b/src/servers/src/opentsdb.rs
@@ -84,7 +84,7 @@ impl OpentsdbServer {
let connection = Connection::new(stream);
let mut handler = Handler::new(query_handler, connection, shutdown);
- io_runtime.spawn(async move {
+ let _handle = io_runtime.spawn(async move {
if let Err(e) = handler.run().await {
error!(e; "Unexpected error when handling OpenTSDB connection");
}
diff --git a/src/servers/src/opentsdb/connection.rs b/src/servers/src/opentsdb/connection.rs
index 1f3f0012fe74..4fa037d0cbeb 100644
--- a/src/servers/src/opentsdb/connection.rs
+++ b/src/servers/src/opentsdb/connection.rs
@@ -100,7 +100,8 @@ impl<S: AsyncWrite + AsyncRead + Unpin> Connection<S> {
.write_all(line.as_bytes())
.await
.context(error::InternalIoSnafu)?;
- self.stream
+ let _ = self
+ .stream
.write(b"\r\n")
.await
.context(error::InternalIoSnafu)?;
diff --git a/src/servers/src/opentsdb/handler.rs b/src/servers/src/opentsdb/handler.rs
index ded95c37a468..e374e84606d2 100644
--- a/src/servers/src/opentsdb/handler.rs
+++ b/src/servers/src/opentsdb/handler.rs
@@ -185,14 +185,14 @@ mod tests {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let addr = listener.local_addr().unwrap();
- tokio::spawn(async move {
+ let _handle = tokio::spawn(async move {
loop {
let (stream, _) = listener.accept().await.unwrap();
let query_handler = query_handler.clone();
let connection = Connection::new(stream);
let shutdown = Shutdown::new(notify_shutdown.subscribe());
- tokio::spawn(async move {
+ let _handle = tokio::spawn(async move {
Handler::new(query_handler, connection, shutdown)
.run()
.await
diff --git a/src/servers/src/postgres.rs b/src/servers/src/postgres.rs
index c13b986d621e..6db0d1a93cb6 100644
--- a/src/servers/src/postgres.rs
+++ b/src/servers/src/postgres.rs
@@ -57,14 +57,13 @@ impl ServerParameterProvider for GreptimeDBStartupParameters {
where
C: ClientInfo,
{
- let mut params = HashMap::with_capacity(4);
- params.insert("server_version".to_owned(), self.version.to_owned());
- params.insert("server_encoding".to_owned(), "UTF8".to_owned());
- params.insert("client_encoding".to_owned(), "UTF8".to_owned());
- params.insert("DateStyle".to_owned(), "ISO YMD".to_owned());
- params.insert("integer_datetimes".to_owned(), "on".to_owned());
-
- Some(params)
+ Some(HashMap::from([
+ ("server_version".to_owned(), self.version.to_owned()),
+ ("server_encoding".to_owned(), "UTF8".to_owned()),
+ ("client_encoding".to_owned(), "UTF8".to_owned()),
+ ("DateStyle".to_owned(), "ISO YMD".to_owned()),
+ ("integer_datetimes".to_owned(), "on".to_owned()),
+ ]))
}
}
diff --git a/src/servers/src/postgres/auth_handler.rs b/src/servers/src/postgres/auth_handler.rs
index 3398d43f355e..29d730151a1b 100644
--- a/src/servers/src/postgres/auth_handler.rs
+++ b/src/servers/src/postgres/auth_handler.rs
@@ -155,12 +155,9 @@ impl StartupHandler for PostgresServerHandler {
// check if db is valid
match resolve_db_info(client, self.query_handler.clone()).await? {
DbResolution::Resolved(catalog, schema) => {
- client
- .metadata_mut()
- .insert(super::METADATA_CATALOG.to_owned(), catalog);
- client
- .metadata_mut()
- .insert(super::METADATA_SCHEMA.to_owned(), schema);
+ let metadata = client.metadata_mut();
+ let _ = metadata.insert(super::METADATA_CATALOG.to_owned(), catalog);
+ let _ = metadata.insert(super::METADATA_SCHEMA.to_owned(), schema);
}
DbResolution::NotFound(msg) => {
send_error(client, "FATAL", "3D000", msg).await?;
diff --git a/src/servers/src/postgres/server.rs b/src/servers/src/postgres/server.rs
index 106ad24e4164..8b6c36c95d15 100644
--- a/src/servers/src/postgres/server.rs
+++ b/src/servers/src/postgres/server.rs
@@ -86,7 +86,7 @@ impl PostgresServer {
Err(e) => warn!("Failed to get PostgreSQL client addr, err: {}", e),
}
- io_runtime.spawn(async move {
+ let _handle = io_runtime.spawn(async move {
increment_gauge!(crate::metrics::METRIC_POSTGRES_CONNECTIONS, 1.0);
let handler = Arc::new(handler);
let r = process_socket(
diff --git a/src/servers/src/prom.rs b/src/servers/src/prom.rs
index 3fcf2d246539..4b5b4da5dfb4 100644
--- a/src/servers/src/prom.rs
+++ b/src/servers/src/prom.rs
@@ -561,7 +561,7 @@ pub async fn labels_query(
let query_ctx = Arc::new(QueryContext::with(catalog, schema));
let mut labels = HashSet::new();
- labels.insert(METRIC_NAME.to_string());
+ let _ = labels.insert(METRIC_NAME.to_string());
for query in queries {
let prom_query = PromQuery {
@@ -585,8 +585,8 @@ pub async fn labels_query(
}
}
- labels.remove(TIMESTAMP_COLUMN_NAME);
- labels.remove(FIELD_COLUMN_NAME);
+ let _ = labels.remove(TIMESTAMP_COLUMN_NAME);
+ let _ = labels.remove(FIELD_COLUMN_NAME);
let mut sorted_labels: Vec<String> = labels.into_iter().collect();
sorted_labels.sort();
@@ -656,7 +656,7 @@ fn record_batches_to_series(
(column_name.to_string(), column.to_string())
})
.collect();
- element.insert("__name__".to_string(), table_name.to_string());
+ let _ = element.insert("__name__".to_string(), table_name.to_string());
series.push(element);
}
}
@@ -711,7 +711,7 @@ fn record_batches_to_labels_name(
// if a field is not null, record the tag name and return
names.iter().for_each(|name| {
- labels.insert(name.to_string());
+ let _ = labels.insert(name.to_string());
});
return Ok(());
}
@@ -858,7 +858,7 @@ async fn retrieve_label_values_from_record_batch(
.unwrap();
for row_index in 0..batch.num_rows() {
if let Some(label_value) = label_column.get_data(row_index) {
- labels_values.insert(label_value.to_string());
+ let _ = labels_values.insert(label_value.to_string());
}
}
}
diff --git a/src/servers/src/server.rs b/src/servers/src/server.rs
index e91fbe508582..69ee0918609a 100644
--- a/src/servers/src/server.rs
+++ b/src/servers/src/server.rs
@@ -112,7 +112,7 @@ impl AcceptTask {
err_msg: format!("{name} server has been started."),
}
);
- self.join_handle.get_or_insert(join_handle);
+ let _handle = self.join_handle.get_or_insert(join_handle);
Ok(())
}
}
diff --git a/src/servers/tests/grpc/mod.rs b/src/servers/tests/grpc/mod.rs
index 76edaee9f2ca..3b8ad1d627da 100644
--- a/src/servers/tests/grpc/mod.rs
+++ b/src/servers/tests/grpc/mod.rs
@@ -81,7 +81,7 @@ impl Server for MockGrpcServer {
let service = self.create_service();
// Would block to serve requests.
- tokio::spawn(async move {
+ let _handle = tokio::spawn(async move {
tonic::transport::Server::builder()
.add_service(service)
.serve_with_incoming(TcpListenerStream::new(listener))
diff --git a/src/servers/tests/http/http_handler_test.rs b/src/servers/tests/http/http_handler_test.rs
index 404df0ff757e..810104836ed9 100644
--- a/src/servers/tests/http/http_handler_test.rs
+++ b/src/servers/tests/http/http_handler_test.rs
@@ -270,7 +270,7 @@ def test(n, **params) -> vector[i64]:
insert_script(script.clone(), script_handler.clone(), sql_handler.clone()).await;
// Run the script
let mut exec = create_script_query();
- exec.0.params.insert("a".to_string(), "42".to_string());
+ let _ = exec.0.params.insert("a".to_string(), "42".to_string());
let Json(json) = script_handler::run_script(
State(ApiState {
sql_handler,
diff --git a/src/servers/tests/http/influxdb_test.rs b/src/servers/tests/http/influxdb_test.rs
index d7a92543afab..bba989d4e4ee 100644
--- a/src/servers/tests/http/influxdb_test.rs
+++ b/src/servers/tests/http/influxdb_test.rs
@@ -108,9 +108,6 @@ fn make_test_app(tx: Arc<mpsc::Sender<(String, String)>>, db_name: Option<&str>)
};
let instance = Arc::new(DummyInstance { tx });
- let mut server_builder = HttpServerBuilder::new(http_opts);
- server_builder.with_sql_handler(instance.clone());
- server_builder.with_grpc_handler(instance.clone());
let mut user_provider = MockUserProvider::default();
if let Some(name) = db_name {
user_provider.set_authorization_info(DatabaseAuthInfo {
@@ -119,10 +116,12 @@ fn make_test_app(tx: Arc<mpsc::Sender<(String, String)>>, db_name: Option<&str>)
username: "greptime",
})
}
- server_builder.with_user_provider(Arc::new(user_provider));
-
- server_builder.with_influxdb_handler(instance);
- let server = server_builder.build();
+ let server = HttpServerBuilder::new(http_opts)
+ .with_sql_handler(instance.clone())
+ .with_grpc_handler(instance.clone())
+ .with_user_provider(Arc::new(user_provider))
+ .with_influxdb_handler(instance)
+ .build();
server.build(server.make_app())
}
diff --git a/src/servers/tests/mod.rs b/src/servers/tests/mod.rs
index ea3f9aa4ccd4..91be2ae62701 100644
--- a/src/servers/tests/mod.rs
+++ b/src/servers/tests/mod.rs
@@ -124,7 +124,8 @@ impl ScriptHandler for DummyInstance {
.await
.unwrap();
script.register_udf().await;
- self.scripts
+ let _ = self
+ .scripts
.write()
.unwrap()
.insert(format!("{schema}_{name}"), Arc::new(script));
diff --git a/src/servers/tests/opentsdb.rs b/src/servers/tests/opentsdb.rs
index 69a90d08ca6f..ccfc4bc86d3e 100644
--- a/src/servers/tests/opentsdb.rs
+++ b/src/servers/tests/opentsdb.rs
@@ -204,7 +204,7 @@ async fn test_opentsdb_connect_after_shutdown() -> Result<()> {
server.shutdown().await.unwrap();
- TcpStream::connect(addr).await.unwrap_err();
+ assert!(TcpStream::connect(addr).await.is_err());
Ok(())
}
diff --git a/src/servers/tests/postgres/mod.rs b/src/servers/tests/postgres/mod.rs
index 523c1ca29276..bb13c40a46c3 100644
--- a/src/servers/tests/postgres/mod.rs
+++ b/src/servers/tests/postgres/mod.rs
@@ -399,7 +399,7 @@ async fn create_secure_connection(
let tls = tokio_postgres_rustls::MakeRustlsConnect::new(config);
let (client, conn) = tokio_postgres::connect(&url, tls).await.expect("connect");
- tokio::spawn(conn);
+ let _handle = tokio::spawn(conn);
Ok(client)
}
@@ -415,7 +415,7 @@ async fn create_plain_connection(
format!("host=127.0.0.1 port={port} connect_timeout=2 dbname={DEFAULT_SCHEMA_NAME}")
};
let (client, conn) = tokio_postgres::connect(&url, NoTls).await?;
- tokio::spawn(conn);
+ let _handle = tokio::spawn(conn);
Ok(client)
}
@@ -425,7 +425,7 @@ async fn create_connection_with_given_db(
) -> std::result::Result<Client, PgError> {
let url = format!("host=127.0.0.1 port={port} connect_timeout=2 dbname={db}");
let (client, conn) = tokio_postgres::connect(&url, NoTls).await?;
- tokio::spawn(conn);
+ let _handle = tokio::spawn(conn);
Ok(client)
}
@@ -436,14 +436,14 @@ async fn create_connection_with_given_catalog_schema(
) -> std::result::Result<Client, PgError> {
let url = format!("host=127.0.0.1 port={port} connect_timeout=2 dbname={catalog}-{schema}");
let (client, conn) = tokio_postgres::connect(&url, NoTls).await?;
- tokio::spawn(conn);
+ let _handle = tokio::spawn(conn);
Ok(client)
}
async fn create_connection_without_db(port: u16) -> std::result::Result<Client, PgError> {
let url = format!("host=127.0.0.1 port={port} connect_timeout=2");
let (client, conn) = tokio_postgres::connect(&url, NoTls).await?;
- tokio::spawn(conn);
+ let _handle = tokio::spawn(conn);
Ok(client)
}
diff --git a/src/session/src/context.rs b/src/session/src/context.rs
index 1a9f38d2d27b..ade230f889be 100644
--- a/src/session/src/context.rs
+++ b/src/session/src/context.rs
@@ -130,7 +130,7 @@ impl QueryContext {
#[inline]
pub fn set_time_zone(&self, tz: Option<TimeZone>) {
- self.time_zone.swap(Arc::new(tz));
+ let _ = self.time_zone.swap(Arc::new(tz));
}
}
diff --git a/src/sql/src/parser.rs b/src/sql/src/parser.rs
index e709cddbf8c9..eb8fc4ac3092 100644
--- a/src/sql/src/parser.rs
+++ b/src/sql/src/parser.rs
@@ -82,24 +82,24 @@ impl<'a> ParserContext<'a> {
Token::Word(w) => {
match w.keyword {
Keyword::CREATE => {
- self.parser.next_token();
+ let _ = self.parser.next_token();
self.parse_create()
}
Keyword::EXPLAIN => {
- self.parser.next_token();
+ let _ = self.parser.next_token();
self.parse_explain()
}
Keyword::SHOW => {
- self.parser.next_token();
+ let _ = self.parser.next_token();
self.parse_show()
}
Keyword::DELETE => self.parse_delete(),
Keyword::DESCRIBE | Keyword::DESC => {
- self.parser.next_token();
+ let _ = self.parser.next_token();
self.parse_describe()
}
@@ -112,7 +112,7 @@ impl<'a> ParserContext<'a> {
Keyword::DROP => self.parse_drop(),
Keyword::USE => {
- self.parser.next_token();
+ let _ = self.parser.next_token();
let database_name =
self.parser
@@ -157,7 +157,7 @@ impl<'a> ParserContext<'a> {
if self.consume_token("DATABASES") || self.consume_token("SCHEMAS") {
self.parse_show_databases()
} else if self.matches_keyword(Keyword::TABLES) {
- self.parser.next_token();
+ let _ = self.parser.next_token();
self.parse_show_tables()
} else if self.consume_token("CREATE") {
if self.consume_token("TABLE") {
@@ -201,7 +201,7 @@ impl<'a> ParserContext<'a> {
// SHOW TABLES [in | FROM] [DATABASE]
Token::Word(w) => match w.keyword {
Keyword::IN | Keyword::FROM => {
- self.parser.next_token();
+ let _ = self.parser.next_token();
let db_name = self.parser.parse_object_name().with_context(|_| {
error::UnexpectedSnafu {
sql: self.sql,
@@ -230,7 +230,7 @@ impl<'a> ParserContext<'a> {
// SHOW TABLES [WHERE | LIKE] [EXPR]
Token::Word(w) => match w.keyword {
Keyword::LIKE => {
- self.parser.next_token();
+ let _ = self.parser.next_token();
ShowKind::Like(self.parser.parse_identifier().with_context(|_| {
error::UnexpectedSnafu {
sql: self.sql,
@@ -240,7 +240,7 @@ impl<'a> ParserContext<'a> {
})?)
}
Keyword::WHERE => {
- self.parser.next_token();
+ let _ = self.parser.next_token();
ShowKind::Where(self.parser.parse_expr().with_context(|_| {
error::UnexpectedSnafu {
sql: self.sql,
@@ -260,7 +260,7 @@ impl<'a> ParserContext<'a> {
/// Parses DESCRIBE statements
fn parse_describe(&mut self) -> Result<Statement> {
if self.matches_keyword(Keyword::TABLE) {
- self.parser.next_token();
+ let _ = self.parser.next_token();
self.parse_describe_table()
} else {
self.unsupported(self.peek_token_as_string())
@@ -299,11 +299,11 @@ impl<'a> ParserContext<'a> {
}
fn parse_drop(&mut self) -> Result<Statement> {
- self.parser.next_token();
+ let _ = self.parser.next_token();
if !self.matches_keyword(Keyword::TABLE) {
return self.unsupported(self.peek_token_as_string());
}
- self.parser.next_token();
+ let _ = self.parser.next_token();
let table_ident =
self.parser
@@ -340,7 +340,7 @@ impl<'a> ParserContext<'a> {
pub fn consume_token(&mut self, expected: &str) -> bool {
if self.peek_token_as_string().to_uppercase() == *expected.to_uppercase() {
- self.parser.next_token();
+ let _ = self.parser.next_token();
true
} else {
false
diff --git a/src/sql/src/parsers/alter_parser.rs b/src/sql/src/parsers/alter_parser.rs
index 2448f383f1e1..e5ffb958669b 100644
--- a/src/sql/src/parsers/alter_parser.rs
+++ b/src/sql/src/parsers/alter_parser.rs
@@ -47,7 +47,7 @@ impl<'a> ParserContext<'a> {
Some(AddColumnLocation::First)
} else if let Token::Word(word) = parser.peek_token().token {
if word.value.to_ascii_uppercase() == "AFTER" {
- parser.next_token();
+ let _ = parser.next_token();
let name = parser.parse_identifier()?;
Some(AddColumnLocation::After {
column_name: name.value,
diff --git a/src/sql/src/parsers/copy_parser.rs b/src/sql/src/parsers/copy_parser.rs
index 1bad3a3f7fc1..8d6bbd1541ac 100644
--- a/src/sql/src/parsers/copy_parser.rs
+++ b/src/sql/src/parsers/copy_parser.rs
@@ -31,10 +31,10 @@ pub type Connection = HashMap<String, String>;
// COPY tbl TO 'output.parquet';
impl<'a> ParserContext<'a> {
pub(crate) fn parse_copy(&mut self) -> Result<Statement> {
- self.parser.next_token();
+ let _ = self.parser.next_token();
let next = self.parser.peek_token();
let copy = if let Word(word) = next.token && word.keyword == Keyword::DATABASE {
- self.parser.next_token();
+ let _ = self.parser.next_token();
let copy_database = self.parser_copy_database()?;
crate::statements::copy::Copy::CopyDatabase(copy_database)
} else {
diff --git a/src/sql/src/parsers/create_parser.rs b/src/sql/src/parsers/create_parser.rs
index 3daec82d6352..c54eff9d9429 100644
--- a/src/sql/src/parsers/create_parser.rs
+++ b/src/sql/src/parsers/create_parser.rs
@@ -61,7 +61,7 @@ impl<'a> ParserContext<'a> {
}
fn parse_create_external_table(&mut self) -> Result<Statement> {
- self.parser.next_token();
+ let _ = self.parser.next_token();
self.parser
.expect_keyword(Keyword::TABLE)
.context(error::SyntaxSnafu { sql: self.sql })?;
@@ -103,7 +103,7 @@ impl<'a> ParserContext<'a> {
}
fn parse_create_database(&mut self) -> Result<Statement> {
- self.parser.next_token();
+ let _ = self.parser.next_token();
let if_not_exists =
self.parser
@@ -125,7 +125,7 @@ impl<'a> ParserContext<'a> {
}
fn parse_create_table(&mut self) -> Result<Statement> {
- self.parser.next_token();
+ let _ = self.parser.next_token();
let if_not_exists =
self.parser
.parse_keywords(&[Keyword::IF, Keyword::NOT, Keyword::EXISTS]);
@@ -377,7 +377,7 @@ impl<'a> ParserContext<'a> {
column.options.push(not_null_opt);
}
- column.options.remove(index);
+ let _ = column.options.remove(index);
}
columns.push(column);
diff --git a/src/sql/src/parsers/delete_parser.rs b/src/sql/src/parsers/delete_parser.rs
index e48f3df61ab0..555f2c6265f4 100644
--- a/src/sql/src/parsers/delete_parser.rs
+++ b/src/sql/src/parsers/delete_parser.rs
@@ -23,7 +23,7 @@ use crate::statements::statement::Statement;
/// DELETE statement parser implementation
impl<'a> ParserContext<'a> {
pub(crate) fn parse_delete(&mut self) -> Result<Statement> {
- self.parser.next_token();
+ let _ = self.parser.next_token();
let spstatement = self
.parser
.parse_delete()
diff --git a/src/sql/src/parsers/insert_parser.rs b/src/sql/src/parsers/insert_parser.rs
index 6f035a72187c..a5a24d402a43 100644
--- a/src/sql/src/parsers/insert_parser.rs
+++ b/src/sql/src/parsers/insert_parser.rs
@@ -23,7 +23,7 @@ use crate::statements::statement::Statement;
/// INSERT statement parser implementation
impl<'a> ParserContext<'a> {
pub(crate) fn parse_insert(&mut self) -> Result<Statement> {
- self.parser.next_token();
+ let _ = self.parser.next_token();
let spstatement = self
.parser
.parse_insert()
diff --git a/src/sql/src/parsers/tql_parser.rs b/src/sql/src/parsers/tql_parser.rs
index 5a1e9b9afeae..d77f7d926d9d 100644
--- a/src/sql/src/parsers/tql_parser.rs
+++ b/src/sql/src/parsers/tql_parser.rs
@@ -34,7 +34,7 @@ use sqlparser::parser::Parser;
/// - TQL ANALYZE <query>
impl<'a> ParserContext<'a> {
pub(crate) fn parse_tql(&mut self) -> Result<Statement> {
- self.parser.next_token();
+ let _ = self.parser.next_token();
match self.parser.peek_token().token {
Token::Word(w) => {
@@ -44,18 +44,18 @@ impl<'a> ParserContext<'a> {
if (uppercase == EVAL || uppercase == EVALUATE)
&& w.quote_style.is_none() =>
{
- self.parser.next_token();
+ let _ = self.parser.next_token();
self.parse_tql_eval()
.context(error::SyntaxSnafu { sql: self.sql })
}
Keyword::EXPLAIN => {
- self.parser.next_token();
+ let _ = self.parser.next_token();
self.parse_tql_explain()
}
Keyword::ANALYZE => {
- self.parser.next_token();
+ let _ = self.parser.next_token();
self.parse_tql_analyze()
.context(error::SyntaxSnafu { sql: self.sql })
}
diff --git a/src/sql/src/statements.rs b/src/sql/src/statements.rs
index a93b004658e6..bd88c268ab13 100644
--- a/src/sql/src/statements.rs
+++ b/src/sql/src/statements.rs
@@ -298,7 +298,7 @@ pub fn column_def_to_schema(column_def: &ColumnDef, is_time_index: bool) -> Resu
None
}
}) {
- column_schema
+ let _ = column_schema
.mut_metadata()
.insert(COMMENT_KEY.to_string(), c.to_string());
}
diff --git a/src/sql/src/statements/describe.rs b/src/sql/src/statements/describe.rs
index 5bc5b4fb0632..006dd83986a9 100644
--- a/src/sql/src/statements/describe.rs
+++ b/src/sql/src/statements/describe.rs
@@ -93,6 +93,6 @@ mod tests {
#[test]
pub fn test_describe_missing_table_name() {
let sql = "DESCRIBE TABLE";
- ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}).unwrap_err();
+ assert!(ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}).is_err());
}
}
diff --git a/src/sql/src/statements/show.rs b/src/sql/src/statements/show.rs
index ef36770ac9ce..409398227960 100644
--- a/src/sql/src/statements/show.rs
+++ b/src/sql/src/statements/show.rs
@@ -135,6 +135,6 @@ mod tests {
#[test]
pub fn test_show_create_missing_table_name() {
let sql = "SHOW CREATE TABLE";
- ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}).unwrap_err();
+ assert!(ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}).is_err());
}
}
diff --git a/src/sql/src/util.rs b/src/sql/src/util.rs
index b27abf79c5f1..0ad23221a365 100644
--- a/src/sql/src/util.rs
+++ b/src/sql/src/util.rs
@@ -32,7 +32,7 @@ pub fn to_lowercase_options_map(opts: &[SqlOption]) -> HashMap<String, String> {
Value::SingleQuotedString(s) | Value::DoubleQuotedString(s) => s.clone(),
_ => value.to_string(),
};
- map.insert(name.value.to_lowercase().clone(), value_str);
+ let _ = map.insert(name.value.to_lowercase().clone(), value_str);
}
map
}
diff --git a/src/storage/benches/memtable/bench_memtable_read.rs b/src/storage/benches/memtable/bench_memtable_read.rs
index ad73708afe1d..b3d3e24f9433 100644
--- a/src/storage/benches/memtable/bench_memtable_read.rs
+++ b/src/storage/benches/memtable/bench_memtable_read.rs
@@ -23,8 +23,9 @@ fn bench_memtable_read(c: &mut Criterion) {
let ctx = BenchContext::new();
kvs.iter().for_each(|kv| ctx.write(kv));
let mut group = c.benchmark_group("memtable_read");
- group.throughput(Throughput::Elements(10 * 10000));
- group.bench_function("read", |b| b.iter(|| ctx.read(100)));
+ let _ = group
+ .throughput(Throughput::Elements(10 * 10000))
+ .bench_function("read", |b| b.iter(|| ctx.read(100)));
group.finish();
}
diff --git a/src/storage/benches/memtable/bench_memtable_read_write_ratio.rs b/src/storage/benches/memtable/bench_memtable_read_write_ratio.rs
index eda89e13e51b..a8b7b55c8fc1 100644
--- a/src/storage/benches/memtable/bench_memtable_read_write_ratio.rs
+++ b/src/storage/benches/memtable/bench_memtable_read_write_ratio.rs
@@ -42,11 +42,11 @@ fn memtable_round(ctx: &BenchContext, input: &Input) {
let now = Instant::now();
let read_count = ctx.read(input.batch_size);
let d = now.elapsed();
- READ_SECS.fetch_add(
+ let _ = READ_SECS.fetch_add(
d.as_secs() as f64 + d.subsec_nanos() as f64 * 1e-9,
Ordering::Relaxed,
);
- READ_NUM.fetch_add(read_count, Ordering::Relaxed);
+ let _ = READ_NUM.fetch_add(read_count, Ordering::Relaxed);
} else {
generate_kvs(input.kv_size, input.batch_size, 20)
.iter()
@@ -54,11 +54,11 @@ fn memtable_round(ctx: &BenchContext, input: &Input) {
let now = Instant::now();
ctx.write(kv);
let d = now.elapsed();
- WRITE_SECS.fetch_add(
+ let _ = WRITE_SECS.fetch_add(
d.as_secs() as f64 + d.subsec_nanos() as f64 * 1e-9,
Ordering::Relaxed,
);
- WRITE_NUM.fetch_add(kv.len(), Ordering::Relaxed);
+ let _ = WRITE_NUM.fetch_add(kv.len(), Ordering::Relaxed);
});
}
}
@@ -111,16 +111,17 @@ fn bench_memtable_read_write_ratio(c: &mut Criterion) {
READ_SECS.store(0.0, Ordering::Relaxed);
WRITE_SECS.store(0.0, Ordering::Relaxed);
- group.bench_with_input(
- BenchmarkId::from_parameter(format!(
- "read ratio: {:.2}% , write ratio: {:.2}%",
- i as f64 / 10_f64 * 100.0,
- (10 - i) as f64 / 10_f64 * 100.0,
- )),
- &i,
- bench_read_write_ctx_frac,
- );
- group.throughput(Throughput::Elements(100 * 1000));
+ let _ = group
+ .bench_with_input(
+ BenchmarkId::from_parameter(format!(
+ "read ratio: {:.2}% , write ratio: {:.2}%",
+ i as f64 / 10_f64 * 100.0,
+ (10 - i) as f64 / 10_f64 * 100.0,
+ )),
+ &i,
+ bench_read_write_ctx_frac,
+ )
+ .throughput(Throughput::Elements(100 * 1000));
// the time is a little different the real time
let read_num = READ_NUM.load(Ordering::Relaxed);
diff --git a/src/storage/benches/memtable/bench_memtable_write.rs b/src/storage/benches/memtable/bench_memtable_write.rs
index 114cb9677dab..ba2a747f069e 100644
--- a/src/storage/benches/memtable/bench_memtable_write.rs
+++ b/src/storage/benches/memtable/bench_memtable_write.rs
@@ -21,11 +21,12 @@ pub fn bench_memtable_write(c: &mut Criterion) {
// the length of string in value is 20
let kvs = generate_kvs(10, 1000, 20);
let mut group = c.benchmark_group("memtable_write");
- group.throughput(Throughput::Elements(10 * 1000));
- group.bench_function("write", |b| {
- let ctx = BenchContext::new();
- b.iter(|| kvs.iter().for_each(|kv| ctx.write(kv)))
- });
+ let _ = group
+ .throughput(Throughput::Elements(10 * 1000))
+ .bench_function("write", |b| {
+ let ctx = BenchContext::new();
+ b.iter(|| kvs.iter().for_each(|kv| ctx.write(kv)))
+ });
group.finish();
}
diff --git a/src/storage/benches/memtable/util/bench_context.rs b/src/storage/benches/memtable/util/bench_context.rs
index 866269aa92f9..0345ab35088b 100644
--- a/src/storage/benches/memtable/util/bench_context.rs
+++ b/src/storage/benches/memtable/util/bench_context.rs
@@ -43,7 +43,7 @@ impl BenchContext {
};
let iter = self.memtable.iter(iter_ctx).unwrap();
for batch in iter {
- batch.unwrap();
+ let _ = batch.unwrap();
read_count += batch_size;
}
read_count
diff --git a/src/storage/benches/wal/bench_decode.rs b/src/storage/benches/wal/bench_decode.rs
index 9af1e624efcd..e55493bd0289 100644
--- a/src/storage/benches/wal/bench_decode.rs
+++ b/src/storage/benches/wal/bench_decode.rs
@@ -58,15 +58,16 @@ fn bench_wal_decode(c: &mut Criterion) {
encode_arrow(&batch_10000, &mut dst_arrow_10000);
let mut group = c.benchmark_group("wal_decode");
- group.bench_function("arrow_decode_with_10_num_rows", |b| {
- b.iter(|| decode_arrow(&dst_arrow_10, &types_10))
- });
- group.bench_function("arrow_decode_with_100_num_rows", |b| {
- b.iter(|| decode_arrow(&dst_arrow_100, &types_100))
- });
- group.bench_function("arrow_decode_with_10000_num_rows", |b| {
- b.iter(|| decode_arrow(&dst_arrow_10000, &types_10000))
- });
+ let _ = group
+ .bench_function("arrow_decode_with_10_num_rows", |b| {
+ b.iter(|| decode_arrow(&dst_arrow_10, &types_10))
+ })
+ .bench_function("arrow_decode_with_100_num_rows", |b| {
+ b.iter(|| decode_arrow(&dst_arrow_100, &types_100))
+ })
+ .bench_function("arrow_decode_with_10000_num_rows", |b| {
+ b.iter(|| decode_arrow(&dst_arrow_10000, &types_10000))
+ });
group.finish();
}
diff --git a/src/storage/benches/wal/bench_encode.rs b/src/storage/benches/wal/bench_encode.rs
index 4cc8ca5f2554..25748a1941b2 100644
--- a/src/storage/benches/wal/bench_encode.rs
+++ b/src/storage/benches/wal/bench_encode.rs
@@ -45,15 +45,16 @@ fn bench_wal_encode(c: &mut Criterion) {
let (batch_10000, _) = gen_new_batch_and_types(100);
let mut group = c.benchmark_group("wal_encode");
- group.bench_function("arrow_encode_with_10_num_rows", |b| {
- b.iter(|| encode_arrow(&batch_10))
- });
- group.bench_function("arrow_encode_with_100_num_rows", |b| {
- b.iter(|| encode_arrow(&batch_100))
- });
- group.bench_function("arrow_encode_with_10000_num_rows", |b| {
- b.iter(|| encode_arrow(&batch_10000))
- });
+ let _ = group
+ .bench_function("arrow_encode_with_10_num_rows", |b| {
+ b.iter(|| encode_arrow(&batch_10))
+ })
+ .bench_function("arrow_encode_with_100_num_rows", |b| {
+ b.iter(|| encode_arrow(&batch_100))
+ })
+ .bench_function("arrow_encode_with_10000_num_rows", |b| {
+ b.iter(|| encode_arrow(&batch_10000))
+ });
group.finish();
}
diff --git a/src/storage/benches/wal/bench_wal.rs b/src/storage/benches/wal/bench_wal.rs
index a7762c6c2cae..cbe4a695578e 100644
--- a/src/storage/benches/wal/bench_wal.rs
+++ b/src/storage/benches/wal/bench_wal.rs
@@ -49,15 +49,16 @@ fn bench_wal_encode_decode(c: &mut Criterion) {
let (batch_10000, types_10000) = gen_new_batch_and_types(100);
let mut group = c.benchmark_group("wal_encode_decode");
- group.bench_function("arrow_encode_decode_with_10_num_rows", |b| {
- b.iter(|| codec_arrow(&batch_10, &types_10))
- });
- group.bench_function("arrow_encode_decode_with_100_num_rows", |b| {
- b.iter(|| codec_arrow(&batch_100, &types_100))
- });
- group.bench_function("arrow_encode_decode_with_10000_num_rows", |b| {
- b.iter(|| codec_arrow(&batch_10000, &types_10000))
- });
+ let _ = group
+ .bench_function("arrow_encode_decode_with_10_num_rows", |b| {
+ b.iter(|| codec_arrow(&batch_10, &types_10))
+ })
+ .bench_function("arrow_encode_decode_with_100_num_rows", |b| {
+ b.iter(|| codec_arrow(&batch_100, &types_100))
+ })
+ .bench_function("arrow_encode_decode_with_10000_num_rows", |b| {
+ b.iter(|| codec_arrow(&batch_10000, &types_10000))
+ });
group.finish();
}
diff --git a/src/storage/benches/wal/util/mod.rs b/src/storage/benches/wal/util/mod.rs
index b51dad9d33bb..9328172729c8 100644
--- a/src/storage/benches/wal/util/mod.rs
+++ b/src/storage/benches/wal/util/mod.rs
@@ -75,17 +75,18 @@ pub fn gen_new_batch_and_types(putdate_nums: usize) -> (WriteBatch, Vec<i32>) {
let tsv = Arc::new(TimestampMillisecondVector::from_values(tsvs)) as VectorRef;
let fvs = Arc::new(Float64Vector::from_slice(fvs)) as VectorRef;
let svs = Arc::new(StringVector::from_slice(&svs)) as VectorRef;
- let mut put_data = HashMap::with_capacity(11);
- put_data.insert("k1".to_string(), intv.clone());
- put_data.insert("v1".to_string(), boolv);
- put_data.insert("ts".to_string(), tsv.clone());
- put_data.insert("4".to_string(), fvs.clone());
- put_data.insert("5".to_string(), fvs.clone());
- put_data.insert("6".to_string(), fvs.clone());
- put_data.insert("7".to_string(), fvs.clone());
- put_data.insert("8".to_string(), fvs.clone());
- put_data.insert("9".to_string(), fvs.clone());
- put_data.insert("10".to_string(), svs.clone());
+ let put_data = HashMap::from([
+ ("k1".to_string(), intv.clone()),
+ ("v1".to_string(), boolv),
+ ("ts".to_string(), tsv.clone()),
+ ("4".to_string(), fvs.clone()),
+ ("5".to_string(), fvs.clone()),
+ ("6".to_string(), fvs.clone()),
+ ("7".to_string(), fvs.clone()),
+ ("8".to_string(), fvs.clone()),
+ ("9".to_string(), fvs),
+ ("10".to_string(), svs),
+ ]);
batch.put(put_data).unwrap();
}
let types = proto::wal::gen_mutation_types(batch.payload());
diff --git a/src/storage/src/compaction/writer.rs b/src/storage/src/compaction/writer.rs
index 9bd6f87a3417..0f9bf766a42a 100644
--- a/src/storage/src/compaction/writer.rs
+++ b/src/storage/src/compaction/writer.rs
@@ -214,7 +214,7 @@ mod tests {
&ts_seg, // keys
&value_seg, // values
);
- seq.fetch_add(1, Ordering::Relaxed);
+ let _ = seq.fetch_add(1, Ordering::Relaxed);
}
let iter = memtable.iter(IterContext::default()).unwrap();
@@ -241,7 +241,7 @@ mod tests {
Arc::new(crate::test_util::access_layer_util::MockAccessLayer {}),
new_noop_file_purger(),
);
- seq.fetch_add(1, Ordering::Relaxed);
+ let _ = seq.fetch_add(1, Ordering::Relaxed);
handle
}
@@ -279,7 +279,7 @@ mod tests {
let dir = create_temp_dir("write_parquet");
let path = dir.path().to_str().unwrap();
let mut builder = Fs::default();
- builder.root(path);
+ let _ = builder.root(path);
let object_store = ObjectStore::new(builder).unwrap().finish();
@@ -358,7 +358,7 @@ mod tests {
let dir = create_temp_dir("write_parquet");
let path = dir.path().to_str().unwrap();
let mut builder = Fs::default();
- builder.root(path);
+ let _ = builder.root(path);
let object_store = ObjectStore::new(builder).unwrap().finish();
let schema = schema_for_test();
diff --git a/src/storage/src/engine.rs b/src/storage/src/engine.rs
index 349433280945..5f15d03d800f 100644
--- a/src/storage/src/engine.rs
+++ b/src/storage/src/engine.rs
@@ -259,7 +259,7 @@ impl<S: LogStore> RegionMap<S> {
}
// No slot in map, we can insert the slot now.
- regions.insert(name.to_string(), slot);
+ let _ = regions.insert(name.to_string(), slot);
None
}
@@ -281,7 +281,7 @@ impl<S: LogStore> RegionMap<S> {
/// Remove region by name.
fn remove(&self, name: &str) {
let mut regions = self.0.write().unwrap();
- regions.remove(name);
+ let _ = regions.remove(name);
}
/// Collects regions.
@@ -557,7 +557,7 @@ mod tests {
let store_dir = tmp_dir.path().to_string_lossy();
let mut builder = Fs::default();
- builder.root(&store_dir);
+ let _ = builder.root(&store_dir);
let object_store = ObjectStore::new(builder).unwrap().finish();
let compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
@@ -652,13 +652,13 @@ mod tests {
let v1 = Arc::new(Float32Vector::from_slice([0.1, 0.2, 0.3])) as VectorRef;
let tsv = Arc::new(TimestampMillisecondVector::from_slice([0, 0, 0])) as VectorRef;
- let mut put_data = HashMap::with_capacity(4);
- put_data.insert("k1".to_string(), k1);
- put_data.insert("v1".to_string(), v1);
- put_data.insert("ts".to_string(), tsv);
-
+ let put_data = HashMap::from([
+ ("k1".to_string(), k1),
+ ("v1".to_string(), v1),
+ ("ts".to_string(), tsv),
+ ]);
wb.put(put_data).unwrap();
- region.write(&WriteContext::default(), wb).await.unwrap();
+ assert!(region.write(&WriteContext::default(), wb).await.is_ok());
// Flush memtable to sst.
region.flush(&FlushContext::default()).await.unwrap();
diff --git a/src/storage/src/file_purger.rs b/src/storage/src/file_purger.rs
index 09373ac69ee7..977a0d7179e3 100644
--- a/src/storage/src/file_purger.rs
+++ b/src/storage/src/file_purger.rs
@@ -173,7 +173,7 @@ mod tests {
async fn test_file_purger_handler() {
let dir = create_temp_dir("file-purge");
let mut builder = Fs::default();
- builder.root(dir.path().to_str().unwrap());
+ let _ = builder.root(dir.path().to_str().unwrap());
let object_store = ObjectStore::new(builder).unwrap().finish();
let sst_file_id = FileId::random();
@@ -210,7 +210,7 @@ mod tests {
common_telemetry::init_default_ut_logging();
let dir = create_temp_dir("file-purge");
let mut builder = Fs::default();
- builder.root(dir.path().to_str().unwrap());
+ let _ = builder.root(dir.path().to_str().unwrap());
let object_store = ObjectStore::new(builder).unwrap().finish();
let sst_file_id = FileId::random();
let scheduler = Arc::new(LocalScheduler::new(
diff --git a/src/storage/src/flush.rs b/src/storage/src/flush.rs
index 8989f21d0ff0..64ddc956cf4f 100644
--- a/src/storage/src/flush.rs
+++ b/src/storage/src/flush.rs
@@ -194,20 +194,20 @@ impl FlushStrategy for SizeBasedStrategy {
fn reserve_mem(&self, mem: usize) {
if self.is_global_limit_enabled() {
- self.memory_used.fetch_add(mem, Ordering::Relaxed);
- self.memory_active.fetch_add(mem, Ordering::Relaxed);
+ let _ = self.memory_used.fetch_add(mem, Ordering::Relaxed);
+ let _ = self.memory_active.fetch_add(mem, Ordering::Relaxed);
}
}
fn schedule_free_mem(&self, mem: usize) {
if self.is_global_limit_enabled() {
- self.memory_active.fetch_sub(mem, Ordering::Relaxed);
+ let _ = self.memory_active.fetch_sub(mem, Ordering::Relaxed);
}
}
fn free_mem(&self, mem: usize) {
if self.is_global_limit_enabled() {
- self.memory_used.fetch_sub(mem, Ordering::Relaxed);
+ let _ = self.memory_used.fetch_sub(mem, Ordering::Relaxed);
}
}
}
diff --git a/src/storage/src/flush/scheduler.rs b/src/storage/src/flush/scheduler.rs
index 76d00c0bbe9a..fa75be05990c 100644
--- a/src/storage/src/flush/scheduler.rs
+++ b/src/storage/src/flush/scheduler.rs
@@ -243,7 +243,7 @@ impl<S: LogStore> FlushScheduler<S> {
/// Schedules a engine flush request.
pub fn schedule_engine_flush(&self) -> Result<()> {
- self.scheduler.schedule(FlushRequest::Engine)?;
+ let _ = self.scheduler.schedule(FlushRequest::Engine)?;
Ok(())
}
@@ -256,7 +256,7 @@ impl<S: LogStore> FlushScheduler<S> {
self.scheduler.stop(true).await?;
#[cfg(test)]
- futures::future::join_all(self.pending_tasks.write().await.drain(..)).await;
+ let _ = futures::future::join_all(self.pending_tasks.write().await.drain(..)).await;
Ok(())
}
@@ -330,7 +330,7 @@ async fn execute_flush_region<S: LogStore>(
let shared_data = req.shared.clone();
// If flush is success, schedule a compaction request for this region.
- region::schedule_compaction(
+ let _ = region::schedule_compaction(
shared_data,
compaction_scheduler,
compaction_request,
@@ -354,7 +354,7 @@ impl<S: LogStore> TaskFunction<Error> for AutoFlushFunction<S> {
async fn call(&mut self) -> Result<()> {
// Get all regions.
let regions = self.regions.list_regions();
- self.picker.pick_by_interval(®ions).await;
+ let _ = self.picker.pick_by_interval(®ions).await;
Ok(())
}
diff --git a/src/storage/src/manifest/action.rs b/src/storage/src/manifest/action.rs
index c091b7d475fe..e92156e84492 100644
--- a/src/storage/src/manifest/action.rs
+++ b/src/storage/src/manifest/action.rs
@@ -125,10 +125,10 @@ impl RegionManifestDataBuilder {
version.manifest_version = manifest_version;
version.flushed_sequence = edit.flushed_sequence;
for file in edit.files_to_add {
- version.files.insert(file.file_id, file);
+ let _ = version.files.insert(file.file_id, file);
}
for file in edit.files_to_remove {
- version.files.remove(&file.file_id);
+ let _ = version.files.remove(&file.file_id);
}
} else {
self.version = Some(RegionVersion {
@@ -338,16 +338,16 @@ mod tests {
#[test]
fn test_region_manifest_compatibility() {
let region_edit = r#"{"region_version":0,"flushed_sequence":null,"files_to_add":[{"region_id":4402341478400,"file_name":"4b220a70-2b03-4641-9687-b65d94641208.parquet","time_range":[{"value":1451609210000,"unit":"Millisecond"},{"value":1451609520000,"unit":"Millisecond"}],"level":1}],"files_to_remove":[{"region_id":4402341478400,"file_name":"34b6ebb9-b8a5-4a4b-b744-56f67defad02.parquet","time_range":[{"value":1451609210000,"unit":"Millisecond"},{"value":1451609520000,"unit":"Millisecond"}],"level":0}]}"#;
- serde_json::from_str::<RegionEdit>(region_edit).unwrap();
+ assert!(serde_json::from_str::<RegionEdit>(region_edit).is_ok());
let region_change = r#" {"committed_sequence":42,"metadata":{"id":0,"name":"region-0","columns":{"columns":[{"cf_id":0,"desc":{"id":2,"name":"k1","data_type":{"Int32":{}},"is_nullable":false,"is_time_index":false,"default_constraint":null,"comment":""}},{"cf_id":0,"desc":{"id":1,"name":"timestamp","data_type":{"Timestamp":{"Millisecond":null}},"is_nullable":false,"is_time_index":true,"default_constraint":null,"comment":""}},{"cf_id":1,"desc":{"id":3,"name":"v1","data_type":{"Float32":{}},"is_nullable":true,"is_time_index":false,"default_constraint":null,"comment":""}},{"cf_id":1,"desc":{"id":2147483649,"name":"__sequence","data_type":{"UInt64":{}},"is_nullable":false,"is_time_index":false,"default_constraint":null,"comment":""}},{"cf_id":1,"desc":{"id":2147483650,"name":"__op_type","data_type":{"UInt8":{}},"is_nullable":false,"is_time_index":false,"default_constraint":null,"comment":""}}],"row_key_end":2,"timestamp_key_index":1,"enable_version_column":false,"user_column_end":3},"column_families":{"column_families":[{"name":"default","cf_id":1,"column_index_start":2,"column_index_end":3}]},"version":0}}"#;
- serde_json::from_str::<RegionChange>(region_change).unwrap();
+ assert!(serde_json::from_str::<RegionChange>(region_change).is_ok());
let region_remove = r#"{"region_id":42}"#;
- serde_json::from_str::<RegionRemove>(region_remove).unwrap();
+ assert!(serde_json::from_str::<RegionRemove>(region_remove).is_ok());
let protocol_action = r#"{"min_reader_version":1,"min_writer_version":2}"#;
- serde_json::from_str::<ProtocolAction>(protocol_action).unwrap();
+ assert!(serde_json::from_str::<ProtocolAction>(protocol_action).is_ok());
}
fn mock_file_meta() -> FileMeta {
diff --git a/src/storage/src/manifest/region.rs b/src/storage/src/manifest/region.rs
index 4c80f47f1ab6..366ae898897a 100644
--- a/src/storage/src/manifest/region.rs
+++ b/src/storage/src/manifest/region.rs
@@ -228,7 +228,7 @@ mod tests {
async fn new_fs_manifest(compress: bool, gc_duration: Option<Duration>) -> RegionManifest {
let tmp_dir = create_temp_dir("test_region_manifest");
let mut builder = Fs::default();
- builder.root(&tmp_dir.path().to_string_lossy());
+ let _ = builder.root(&tmp_dir.path().to_string_lossy());
let object_store = ObjectStore::new(builder).unwrap().finish();
let manifest = RegionManifest::with_checkpointer(
@@ -248,14 +248,14 @@ mod tests {
) -> (RegionManifest, TempFolder) {
let s3_config = s3_test_config().unwrap();
let mut builder = S3::default();
- builder
+ let _ = builder
.root(&s3_config.root)
.access_key_id(&s3_config.access_key_id)
.secret_access_key(&s3_config.secret_access_key)
.bucket(&s3_config.bucket);
if s3_config.region.is_some() {
- builder.region(s3_config.region.as_ref().unwrap());
+ let _ = builder.region(s3_config.region.as_ref().unwrap());
}
let store = ObjectStore::new(builder).unwrap().finish();
let temp_folder = TempFolder::new(&store, "/");
@@ -287,7 +287,7 @@ mod tests {
.unwrap()
);
- manifest
+ assert!(manifest
.update(RegionMetaActionList::with_action(RegionMetaAction::Change(
RegionChange {
metadata: region_meta.as_ref().into(),
@@ -295,7 +295,7 @@ mod tests {
},
)))
.await
- .unwrap();
+ .is_ok());
let mut iter = manifest.scan(0, MAX_VERSION).await.unwrap();
@@ -322,7 +322,7 @@ mod tests {
}
// Save some actions
- manifest
+ assert!(manifest
.update(RegionMetaActionList::new(vec![
RegionMetaAction::Edit(build_region_edit(1, &[FileId::random()], &[])),
RegionMetaAction::Edit(build_region_edit(
@@ -332,7 +332,7 @@ mod tests {
)),
]))
.await
- .unwrap();
+ .is_ok());
let mut iter = manifest.scan(0, MAX_VERSION).await.unwrap();
let (v, action_list) = iter.next_action().await.unwrap().unwrap();
@@ -444,7 +444,7 @@ mod tests {
];
for action in actions {
- manifest.update(action).await.unwrap();
+ assert!(manifest.update(action).await.is_ok());
}
assert!(manifest.last_checkpoint().await.unwrap().is_none());
assert_scan(manifest, 0, 3).await;
@@ -503,7 +503,7 @@ mod tests {
))]),
];
for action in actions {
- manifest.update(action).await.unwrap();
+ assert!(manifest.update(action).await.is_ok());
}
assert_scan(manifest, 3, 2).await;
diff --git a/src/storage/src/manifest/storage.rs b/src/storage/src/manifest/storage.rs
index 0e88b9b556dc..86bc9c99d830 100644
--- a/src/storage/src/manifest/storage.rs
+++ b/src/storage/src/manifest/storage.rs
@@ -534,7 +534,7 @@ mod tests {
common_telemetry::init_default_ut_logging();
let tmp_dir = create_temp_dir("test_manifest_log_store");
let mut builder = Fs::default();
- builder.root(&tmp_dir.path().to_string_lossy());
+ let _ = builder.root(&tmp_dir.path().to_string_lossy());
let object_store = ObjectStore::new(builder).unwrap().finish();
ManifestObjectStore::new("/", object_store, CompressionType::Uncompressed)
}
@@ -610,7 +610,7 @@ mod tests {
assert_eq!(3, v);
//delete (,4) logs and keep checkpoint 3.
- log_store.delete_until(4, true).await.unwrap();
+ assert!(log_store.delete_until(4, true).await.is_ok());
assert!(log_store.load_checkpoint(3).await.unwrap().is_some());
assert!(log_store.load_last_checkpoint().await.unwrap().is_some());
let mut it = log_store.scan(0, 11).await.unwrap();
@@ -620,7 +620,7 @@ mod tests {
assert!(it.next_log().await.unwrap().is_none());
// delete all logs and checkpoints
- log_store.delete_until(11, false).await.unwrap();
+ assert!(log_store.delete_until(11, false).await.is_ok());
assert!(log_store.load_checkpoint(3).await.unwrap().is_none());
assert!(log_store.load_last_checkpoint().await.unwrap().is_none());
let mut it = log_store.scan(0, 11).await.unwrap();
diff --git a/src/storage/src/memtable.rs b/src/storage/src/memtable.rs
index 9a4aea5dd9ef..8057069c66af 100644
--- a/src/storage/src/memtable.rs
+++ b/src/storage/src/memtable.rs
@@ -229,7 +229,7 @@ impl AllocTracker {
/// Tracks `bytes` memory is allocated.
pub(crate) fn on_allocate(&self, bytes: usize) {
- self.bytes_allocated.fetch_add(bytes, Ordering::Relaxed);
+ let _ = self.bytes_allocated.fetch_add(bytes, Ordering::Relaxed);
increment_gauge!(WRITE_BUFFER_BYTES, bytes as f64);
if let Some(flush_strategy) = &self.flush_strategy {
flush_strategy.reserve_mem(bytes);
diff --git a/src/storage/src/memtable/btree.rs b/src/storage/src/memtable/btree.rs
index 0400a2e3a1d1..949d2c67c37d 100644
--- a/src/storage/src/memtable/btree.rs
+++ b/src/storage/src/memtable/btree.rs
@@ -137,7 +137,7 @@ impl Memtable for BTreeMemtable {
if ts > max_ts {
*max_ts = ts.clone();
}
- map.insert(inner_key, row_value);
+ let _ = map.insert(inner_key, row_value);
}
self.update_stats(kvs.estimated_memory_size(), min_ts, max_ts);
diff --git a/src/storage/src/memtable/inserter.rs b/src/storage/src/memtable/inserter.rs
index 5fa263e1b5f4..8dfdd0bad21a 100644
--- a/src/storage/src/memtable/inserter.rs
+++ b/src/storage/src/memtable/inserter.rs
@@ -163,11 +163,12 @@ mod tests {
}
fn put_batch(batch: &mut WriteBatch, data: &[(i64, Option<i64>)]) {
- let mut put_data = HashMap::with_capacity(2);
let ts = TimestampMillisecondVector::from_values(data.iter().map(|v| v.0));
- put_data.insert("ts".to_string(), Arc::new(ts) as VectorRef);
let value = Int64Vector::from(data.iter().map(|v| v.1).collect::<Vec<_>>());
- put_data.insert("value".to_string(), Arc::new(value) as VectorRef);
+ let put_data = HashMap::from([
+ ("ts".to_string(), Arc::new(ts) as VectorRef),
+ ("value".to_string(), Arc::new(value) as VectorRef),
+ ]);
batch.put(put_data).unwrap();
}
diff --git a/src/storage/src/metadata.rs b/src/storage/src/metadata.rs
index e6c91bcd3a19..50dfb7935826 100644
--- a/src/storage/src/metadata.rs
+++ b/src/storage/src/metadata.rs
@@ -425,11 +425,11 @@ impl ColumnMetadata {
fn to_metadata(&self) -> Metadata {
let mut metadata = Metadata::new();
if self.cf_id != consts::DEFAULT_CF_ID {
- metadata.insert(METADATA_CF_ID_KEY.to_string(), self.cf_id.to_string());
+ let _ = metadata.insert(METADATA_CF_ID_KEY.to_string(), self.cf_id.to_string());
}
- metadata.insert(METADATA_COLUMN_ID_KEY.to_string(), self.desc.id.to_string());
+ let _ = metadata.insert(METADATA_COLUMN_ID_KEY.to_string(), self.desc.id.to_string());
if !self.desc.comment.is_empty() {
- metadata.insert(COMMENT_KEY.to_string(), self.desc.comment.clone());
+ let _ = metadata.insert(COMMENT_KEY.to_string(), self.desc.comment.clone());
}
metadata
@@ -657,12 +657,12 @@ struct ColumnsMetadataBuilder {
impl ColumnsMetadataBuilder {
fn row_key(&mut self, key: RowKeyDescriptor) -> Result<&mut Self> {
for col in key.columns {
- self.push_row_key_column(col)?;
+ let _ = self.push_row_key_column(col)?;
}
// TODO(yingwen): Validate this is a timestamp column.
self.timestamp_key_index = Some(self.columns.len());
- self.push_row_key_column(key.timestamp)?;
+ let _ = self.push_row_key_column(key.timestamp)?;
self.row_key_end = self.columns.len();
Ok(self)
@@ -705,8 +705,8 @@ impl ColumnsMetadataBuilder {
let column_index = self.columns.len();
self.columns.push(meta);
- self.name_to_col_index.insert(column_name, column_index);
- self.column_ids.insert(column_id);
+ let _ = self.name_to_col_index.insert(column_name, column_index);
+ let _ = self.column_ids.insert(column_id);
Ok(self)
}
@@ -717,7 +717,7 @@ impl ColumnsMetadataBuilder {
let user_column_end = self.columns.len();
// Setup internal columns.
for internal_desc in internal_column_descs() {
- self.push_new_column(consts::DEFAULT_CF_ID, internal_desc)?;
+ let _ = self.push_new_column(consts::DEFAULT_CF_ID, internal_desc)?;
}
Ok(ColumnsMetadata {
@@ -748,8 +748,8 @@ impl ColumnFamiliesMetadataBuilder {
CfNameExistsSnafu { name: &cf.name }
);
- self.cf_names.insert(cf.name.clone());
- self.id_to_cfs.insert(cf.cf_id, cf);
+ let _ = self.cf_names.insert(cf.name.clone());
+ let _ = self.id_to_cfs.insert(cf.cf_id, cf);
Ok(self)
}
@@ -802,7 +802,7 @@ impl RegionMetadataBuilder {
}
fn row_key(mut self, key: RowKeyDescriptor) -> Result<Self> {
- self.columns_meta_builder.row_key(key)?;
+ let _ = self.columns_meta_builder.row_key(key)?;
Ok(self)
}
@@ -817,10 +817,10 @@ impl RegionMetadataBuilder {
column_index_end,
};
- self.cfs_meta_builder.add_column_family(cf_meta)?;
+ let _ = self.cfs_meta_builder.add_column_family(cf_meta)?;
for col in cf.columns {
- self.columns_meta_builder.push_field_column(cf.cf_id, col)?;
+ let _ = self.columns_meta_builder.push_field_column(cf.cf_id, col)?;
}
Ok(self)
diff --git a/src/storage/src/region.rs b/src/storage/src/region.rs
index a26ed012ce34..2626ef0bfb67 100644
--- a/src/storage/src/region.rs
+++ b/src/storage/src/region.rs
@@ -477,7 +477,7 @@ impl<S: LogStore> RegionImpl<S> {
}
}
(RegionMetaAction::Change(c), Some(v)) => {
- recovered_metadata
+ let _ = recovered_metadata
.insert(c.committed_sequence, (manifest_version, c.metadata));
version = Some(v);
}
diff --git a/src/storage/src/region/tests.rs b/src/storage/src/region/tests.rs
index 5665cd4037ee..ba88e02ba8fb 100644
--- a/src/storage/src/region/tests.rs
+++ b/src/storage/src/region/tests.rs
@@ -90,7 +90,7 @@ impl<S: LogStore> TesterBase<S> {
pub async fn checkpoint_manifest(&self) {
let manifest = &self.region.inner.manifest;
manifest.set_flushed_manifest_version(manifest.last_version() - 1);
- manifest.do_checkpoint().await.unwrap().unwrap();
+ assert!(manifest.do_checkpoint().await.unwrap().is_some());
}
pub async fn close(&self) {
@@ -256,33 +256,26 @@ fn new_write_batch_for_test(enable_version_column: bool) -> WriteBatch {
}
fn new_put_data(data: &[(TimestampMillisecond, Option<String>)]) -> HashMap<String, VectorRef> {
- let mut put_data = HashMap::with_capacity(2);
-
let timestamps =
TimestampMillisecondVector::from_vec(data.iter().map(|v| v.0.into()).collect());
let values = StringVector::from(data.iter().map(|kv| kv.1.clone()).collect::<Vec<_>>());
- put_data.insert(
- test_util::TIMESTAMP_NAME.to_string(),
- Arc::new(timestamps) as VectorRef,
- );
- put_data.insert("v0".to_string(), Arc::new(values) as VectorRef);
-
- put_data
+ HashMap::from([
+ (
+ test_util::TIMESTAMP_NAME.to_string(),
+ Arc::new(timestamps) as VectorRef,
+ ),
+ ("v0".to_string(), Arc::new(values) as VectorRef),
+ ])
}
fn new_delete_data(keys: &[TimestampMillisecond]) -> HashMap<String, VectorRef> {
- let mut delete_data = HashMap::new();
-
let timestamps =
TimestampMillisecondVector::from_vec(keys.iter().map(|v| v.0.into()).collect());
-
- delete_data.insert(
+ HashMap::from([(
test_util::TIMESTAMP_NAME.to_string(),
Arc::new(timestamps) as VectorRef,
- );
-
- delete_data
+ )])
}
fn append_chunk_to(chunk: &Chunk, dst: &mut Vec<(i64, Option<String>)>) {
@@ -357,7 +350,7 @@ async fn test_recover_region_manifets(compress: bool) {
let memtable_builder = Arc::new(DefaultMemtableBuilder::default()) as _;
let mut builder = Fs::default();
- builder.root(&tmp_dir.path().to_string_lossy());
+ let _ = builder.root(&tmp_dir.path().to_string_lossy());
let object_store = ObjectStore::new(builder).unwrap().finish();
let manifest = RegionManifest::with_checkpointer(
@@ -392,7 +385,7 @@ async fn test_recover_region_manifets(compress: bool) {
{
// save some actions into region_meta
- manifest
+ assert!(manifest
.update(RegionMetaActionList::with_action(RegionMetaAction::Change(
RegionChange {
metadata: region_meta.as_ref().into(),
@@ -400,17 +393,17 @@ async fn test_recover_region_manifets(compress: bool) {
},
)))
.await
- .unwrap();
+ .is_ok());
- manifest
+ assert!(manifest
.update(RegionMetaActionList::new(vec![
RegionMetaAction::Edit(build_region_edit(1, &[file_id_a], &[])),
RegionMetaAction::Edit(build_region_edit(2, &[file_id_b, file_id_c], &[])),
]))
.await
- .unwrap();
+ .is_ok());
- manifest
+ assert!(manifest
.update(RegionMetaActionList::with_action(RegionMetaAction::Change(
RegionChange {
metadata: region_meta.as_ref().into(),
@@ -418,7 +411,7 @@ async fn test_recover_region_manifets(compress: bool) {
},
)))
.await
- .unwrap();
+ .is_ok());
}
// try to recover
@@ -516,7 +509,7 @@ fn create_region_meta(region_name: &str) -> RegionMetadata {
async fn create_store_config(region_name: &str, root: &str) -> StoreConfig<NoopLogStore> {
let mut builder = Fs::default();
- builder.root(root);
+ let _ = builder.root(root);
let object_store = ObjectStore::new(builder).unwrap().finish();
let parent_dir = "";
let sst_dir = engine::region_sst_dir(parent_dir, region_name);
@@ -618,10 +611,11 @@ impl WindowedReaderTester {
.collect::<HashMap<String, VectorRef>>();
write_batch.put(columns).unwrap();
- self.region
+ assert!(self
+ .region
.write(&WriteContext {}, write_batch)
.await
- .unwrap();
+ .is_ok());
// flush the region to ensure data resides across SST files.
self.region
diff --git a/src/storage/src/region/tests/alter.rs b/src/storage/src/region/tests/alter.rs
index 2a4b58528c23..75d38a6c2730 100644
--- a/src/storage/src/region/tests/alter.rs
+++ b/src/storage/src/region/tests/alter.rs
@@ -23,7 +23,7 @@ use log_store::raft_engine::log_store::RaftEngineLogStore;
use store_api::storage::{
AddColumn, AlterOperation, AlterRequest, Chunk, ChunkReader, ColumnDescriptor,
ColumnDescriptorBuilder, ColumnId, FlushContext, FlushReason, Region, RegionMeta, ScanRequest,
- SchemaRef, Snapshot, WriteRequest, WriteResponse,
+ SchemaRef, Snapshot, WriteRequest,
};
use crate::config::EngineConfig;
@@ -75,7 +75,6 @@ impl DataRow {
}
fn new_put_data(data: &[DataRow]) -> HashMap<String, VectorRef> {
- let mut put_data = HashMap::with_capacity(4);
let keys = Int64Vector::from(data.iter().map(|v| v.key).collect::<Vec<_>>());
let timestamps = TimestampMillisecondVector::from(
data.iter()
@@ -85,16 +84,15 @@ fn new_put_data(data: &[DataRow]) -> HashMap<String, VectorRef> {
let values1 = StringVector::from(data.iter().map(|v| v.v0.clone()).collect::<Vec<_>>());
let values2 = Int64Vector::from(data.iter().map(|kv| kv.v1).collect::<Vec<_>>());
- put_data.insert("k0".to_string(), Arc::new(keys) as VectorRef);
- put_data.insert(
- test_util::TIMESTAMP_NAME.to_string(),
- Arc::new(timestamps) as VectorRef,
- );
-
- put_data.insert("v0".to_string(), Arc::new(values1) as VectorRef);
- put_data.insert("v1".to_string(), Arc::new(values2) as VectorRef);
-
- put_data
+ HashMap::from([
+ ("k0".to_string(), Arc::new(keys) as VectorRef),
+ (
+ test_util::TIMESTAMP_NAME.to_string(),
+ Arc::new(timestamps) as VectorRef,
+ ),
+ ("v0".to_string(), Arc::new(values1) as VectorRef),
+ ("v1".to_string(), Arc::new(values2) as VectorRef),
+ ])
}
impl AlterTester {
@@ -151,16 +149,17 @@ impl AlterTester {
}
// Put with schema k0, ts, v0, v1
- async fn put(&self, data: &[DataRow]) -> WriteResponse {
+ async fn put(&self, data: &[DataRow]) {
let mut batch = self.base().region.write_request();
let put_data = new_put_data(data);
batch.put(put_data).unwrap();
- self.base()
+ assert!(self
+ .base()
.region
.write(&self.base().write_ctx, batch)
.await
- .unwrap()
+ .is_ok());
}
/// Put data with initial schema.
@@ -170,7 +169,7 @@ impl AlterTester {
.iter()
.map(|(ts, v0)| (*ts, v0.map(|v| v.to_string())))
.collect::<Vec<_>>();
- self.base().put(&data).await;
+ let _ = self.base().put(&data).await;
}
/// Put data to inner writer with initial schema.
@@ -180,7 +179,7 @@ impl AlterTester {
.map(|(ts, v0)| (*ts, v0.map(|v| v.to_string())))
.collect::<Vec<_>>();
// put of FileTesterBase always use initial schema version.
- self.base().put_inner(&data).await;
+ let _ = self.base().put_inner(&data).await;
}
async fn alter(&self, mut req: AlterRequest) {
@@ -475,8 +474,6 @@ async fn test_replay_metadata_after_open() {
let manifest_version = tester.base().region.current_manifest_version();
let version = tester.version();
- let mut recovered_metadata = BTreeMap::new();
-
let desc = RegionDescBuilder::new(REGION_NAME)
.push_key_column(("k1", LogicalTypeId::Int32, false))
.push_field_column(("v0", LogicalTypeId::Float32, true))
@@ -484,7 +481,10 @@ async fn test_replay_metadata_after_open() {
let metadata: &RegionMetadata = &desc.try_into().unwrap();
let mut raw_metadata: RawRegionMetadata = metadata.into();
raw_metadata.version = version + 1;
- recovered_metadata.insert(committed_sequence, (manifest_version + 1, raw_metadata));
+
+ let recovered_metadata =
+ BTreeMap::from([(committed_sequence, (manifest_version + 1, raw_metadata))]);
+
tester.base().replay_inner(recovered_metadata).await;
let schema = tester.schema();
check_schema_names(&schema, &["k1", "timestamp", "v0"]);
diff --git a/src/storage/src/region/tests/basic.rs b/src/storage/src/region/tests/basic.rs
index c917635a7dd6..13565fba682c 100644
--- a/src/storage/src/region/tests/basic.rs
+++ b/src/storage/src/region/tests/basic.rs
@@ -17,7 +17,7 @@
use common_telemetry::info;
use common_test_util::temp_dir::create_temp_dir;
use log_store::raft_engine::log_store::RaftEngineLogStore;
-use store_api::storage::{OpenOptions, SequenceNumber, WriteResponse};
+use store_api::storage::{OpenOptions, SequenceNumber};
use crate::config::EngineConfig;
use crate::error::Result;
@@ -65,7 +65,7 @@ impl Tester {
}
async fn reopen(&mut self) {
- self.try_reopen().await.unwrap();
+ let _ = self.try_reopen().await.unwrap();
}
async fn try_reopen(&mut self) -> Result<bool> {
@@ -105,8 +105,8 @@ impl Tester {
self.base.as_mut().unwrap().read_ctx.batch_size = batch_size;
}
- async fn put(&self, data: &[(i64, Option<String>)]) -> WriteResponse {
- self.base().put(data).await
+ async fn put(&self, data: &[(i64, Option<String>)]) {
+ let _ = self.base().put(data).await;
}
async fn full_scan(&self) -> Vec<(i64, Option<String>)> {
@@ -117,8 +117,8 @@ impl Tester {
self.base().committed_sequence()
}
- async fn delete(&self, keys: &[i64]) -> WriteResponse {
- self.base().delete(keys).await
+ async fn delete(&self, keys: &[i64]) {
+ let _ = self.base().delete(keys).await;
}
}
@@ -247,7 +247,7 @@ async fn test_put_delete_scan() {
assert_eq!(expect, output);
// Deletion is also persistent.
- tester.try_reopen().await.unwrap();
+ let _ = tester.try_reopen().await.unwrap();
let output = tester.full_scan().await;
assert_eq!(expect, output);
}
@@ -282,7 +282,7 @@ async fn test_put_delete_absent_key() {
assert_eq!(expect, output);
// Deletion is also persistent.
- tester.try_reopen().await.unwrap();
+ let _ = tester.try_reopen().await.unwrap();
let output = tester.full_scan().await;
assert_eq!(expect, output);
}
diff --git a/src/storage/src/region/tests/close.rs b/src/storage/src/region/tests/close.rs
index 3046a60dd9c6..44d54ac5bd28 100644
--- a/src/storage/src/region/tests/close.rs
+++ b/src/storage/src/region/tests/close.rs
@@ -66,12 +66,12 @@ impl CloseTester {
self.base.as_ref().unwrap()
}
- async fn put(&self, data: &[(i64, Option<i64>)]) -> WriteResponse {
+ async fn put(&self, data: &[(i64, Option<i64>)]) {
let data = data
.iter()
.map(|(ts, v0)| (*ts, v0.map(|v| v.to_string())))
.collect::<Vec<_>>();
- self.base().put(&data).await
+ let _ = self.base().put(&data).await;
}
async fn try_put(&self, data: &[(i64, Option<i64>)]) -> Result<WriteResponse, Error> {
diff --git a/src/storage/src/region/tests/compact.rs b/src/storage/src/region/tests/compact.rs
index 298729b6a064..8ff693445c8c 100644
--- a/src/storage/src/region/tests/compact.rs
+++ b/src/storage/src/region/tests/compact.rs
@@ -23,7 +23,7 @@ use common_test_util::temp_dir::create_temp_dir;
use log_store::raft_engine::log_store::RaftEngineLogStore;
use object_store::services::{Fs, S3};
use object_store::ObjectStore;
-use store_api::storage::{FlushContext, FlushReason, OpenOptions, Region, WriteResponse};
+use store_api::storage::{FlushContext, FlushReason, OpenOptions, Region};
use tokio::sync::{Notify, RwLock};
use crate::compaction::{CompactionHandler, SimplePicker};
@@ -47,7 +47,7 @@ fn new_object_store(store_dir: &str, s3_bucket: Option<String>) -> ObjectStore {
let root = uuid::Uuid::new_v4().to_string();
let mut builder = S3::default();
- builder
+ let _ = builder
.root(&root)
.access_key_id(&env::var("GT_S3_ACCESS_KEY_ID").unwrap())
.secret_access_key(&env::var("GT_S3_ACCESS_KEY").unwrap())
@@ -61,7 +61,7 @@ fn new_object_store(store_dir: &str, s3_bucket: Option<String>) -> ObjectStore {
logging::info!("Use local fs object store");
let mut builder = Fs::default();
- builder.root(store_dir);
+ let _ = builder.root(store_dir);
ObjectStore::new(builder).unwrap().finish()
}
@@ -144,7 +144,7 @@ impl Handler for MockFilePurgeHandler {
.await
.unwrap();
- self.num_deleted.fetch_add(1, Ordering::Relaxed);
+ let _ = self.num_deleted.fetch_add(1, Ordering::Relaxed);
Ok(())
}
@@ -205,12 +205,12 @@ impl CompactionTester {
self.base.as_mut().unwrap()
}
- async fn put(&self, data: &[(i64, Option<i64>)]) -> WriteResponse {
+ async fn put(&self, data: &[(i64, Option<i64>)]) {
let data = data
.iter()
.map(|(ts, v0)| (*ts, v0.map(|v| v.to_string())))
.collect::<Vec<_>>();
- self.base().put(&data).await
+ let _ = self.base().put(&data).await;
}
async fn flush(&self, wait: Option<bool>) {
@@ -243,7 +243,7 @@ impl CompactionTester {
async fn reopen(&mut self) -> Result<bool> {
// Close the old region.
if let Some(base) = self.base.take() {
- futures::future::join_all(self.pending_tasks.write().await.drain(..)).await;
+ let _ = futures::future::join_all(self.pending_tasks.write().await.drain(..)).await;
base.close().await;
}
diff --git a/src/storage/src/region/tests/flush.rs b/src/storage/src/region/tests/flush.rs
index fc4dc6fc0a65..29a060abd722 100644
--- a/src/storage/src/region/tests/flush.rs
+++ b/src/storage/src/region/tests/flush.rs
@@ -23,9 +23,7 @@ use common_recordbatch::OrderOption;
use common_test_util::temp_dir::create_temp_dir;
use datafusion_common::Column;
use log_store::raft_engine::log_store::RaftEngineLogStore;
-use store_api::storage::{
- FlushContext, FlushReason, OpenOptions, Region, ScanRequest, WriteResponse,
-};
+use store_api::storage::{FlushContext, FlushReason, OpenOptions, Region, ScanRequest};
use crate::config::EngineConfig;
use crate::engine::{self, RegionMap};
@@ -114,12 +112,12 @@ impl FlushTester {
self.base.as_ref().unwrap()
}
- async fn put(&self, data: &[(i64, Option<i64>)]) -> WriteResponse {
+ async fn put(&self, data: &[(i64, Option<i64>)]) {
let data = data
.iter()
.map(|(ts, v0)| (*ts, v0.map(|v| v.to_string())))
.collect::<Vec<_>>();
- self.base().put(&data).await
+ let _ = self.base().put(&data).await;
}
async fn full_scan(&self) -> Vec<(i64, Option<String>)> {
@@ -347,7 +345,7 @@ async fn test_schedule_engine_flush() {
assert_eq!(0, tester.base().region.last_flush_millis());
// Insert the region to the region map.
- tester.regions.get_or_occupy_slot(
+ let _ = tester.regions.get_or_occupy_slot(
REGION_NAME,
engine::RegionSlot::Ready(tester.base().region.clone()),
);
diff --git a/src/storage/src/region/tests/projection.rs b/src/storage/src/region/tests/projection.rs
index 79d957ffb327..d32e36e8898a 100644
--- a/src/storage/src/region/tests/projection.rs
+++ b/src/storage/src/region/tests/projection.rs
@@ -68,8 +68,6 @@ fn new_put_data(
ts_start: i64,
initial_value: i64,
) -> HashMap<String, VectorRef> {
- let mut put_data = HashMap::with_capacity(4);
-
let k0 = Arc::new(Int64Vector::from_values(
(0..len).map(|v| key_start + v as i64),
)) as VectorRef;
@@ -83,12 +81,12 @@ fn new_put_data(
(0..len).map(|v| initial_value + v as i64),
)) as VectorRef;
- put_data.insert("k0".to_string(), k0);
- put_data.insert(test_util::TIMESTAMP_NAME.to_string(), ts);
- put_data.insert("v0".to_string(), v0);
- put_data.insert("v1".to_string(), v1);
-
- put_data
+ HashMap::from([
+ ("k0".to_string(), k0),
+ (test_util::TIMESTAMP_NAME.to_string(), ts),
+ ("v0".to_string(), v0),
+ ("v1".to_string(), v1),
+ ])
}
fn append_chunk_to(chunk: &Chunk, dst: &mut Vec<Vec<i64>>) {
@@ -144,7 +142,7 @@ impl<S: LogStore> ProjectionTester<S> {
let put_data = new_put_data(len, key_start, ts_start, initial_value);
batch.put(put_data).unwrap();
- self.region.write(&self.write_ctx, batch).await.unwrap();
+ assert!(self.region.write(&self.write_ctx, batch).await.is_ok());
}
async fn scan(&self, projection: Option<Vec<usize>>) -> Vec<Vec<i64>> {
diff --git a/src/storage/src/region/writer.rs b/src/storage/src/region/writer.rs
index 9a85762c4969..31f08d6acc1b 100644
--- a/src/storage/src/region/writer.rs
+++ b/src/storage/src/region/writer.rs
@@ -251,7 +251,7 @@ impl RegionWriter {
version_control.set_committed_sequence(next_sequence);
let header = WalHeader::with_last_manifest_version(manifest_version);
- wal.write_to_wal(next_sequence, header, None).await?;
+ let _ = wal.write_to_wal(next_sequence, header, None).await?;
Ok(())
}
@@ -317,7 +317,7 @@ impl RegionWriter {
action_list
);
- drop_ctx.manifest.update(action_list).await?;
+ let _ = drop_ctx.manifest.update(action_list).await?;
// Mark all data obsolete and delete the namespace in the WAL
drop_ctx.wal.obsolete(committed_sequence).await?;
@@ -505,7 +505,7 @@ impl WriterInner {
let version = version_control.current();
let wal_header = WalHeader::with_last_manifest_version(version.manifest_version());
- writer_ctx
+ let _ = writer_ctx
.wal
.write_to_wal(next_sequence, wal_header, Some(request.payload()))
.await?;
@@ -837,7 +837,7 @@ impl WriterInner {
.context(error::CompactTaskCancelSnafu { region_id })??;
}
} else {
- schedule_compaction(
+ let _ = schedule_compaction(
shared_data,
compaction_scheduler,
compaction_request,
diff --git a/src/storage/src/scheduler.rs b/src/storage/src/scheduler.rs
index 4f6d1689cdac..202fed5970c8 100644
--- a/src/storage/src/scheduler.rs
+++ b/src/storage/src/scheduler.rs
@@ -289,7 +289,7 @@ where
#[inline]
async fn put_back_req(&self, key: R::Key, req: R) {
let mut queue = self.req_queue.write().unwrap();
- queue.push_front(key, req);
+ let _ = queue.push_front(key, req);
}
// Handles request, submit task to bg runtime.
@@ -375,11 +375,11 @@ mod tests {
});
let handler_cloned = handler.clone();
- common_runtime::spawn_bg(async move { handler_cloned.run().await });
+ let _handle = common_runtime::spawn_bg(async move { handler_cloned.run().await });
- queue.write().unwrap().push_back(1, MockRequest::default());
+ let _ = queue.write().unwrap().push_back(1, MockRequest::default());
handler.task_notifier.notify_one();
- queue.write().unwrap().push_back(2, MockRequest::default());
+ let _ = queue.write().unwrap().push_back(2, MockRequest::default());
handler.task_notifier.notify_one();
tokio::time::timeout(Duration::from_secs(1), latch.wait())
@@ -443,9 +443,8 @@ mod tests {
handler,
);
- scheduler.schedule(MockRequest { region_id: 1 }).unwrap();
-
- scheduler.schedule(MockRequest { region_id: 2 }).unwrap();
+ assert!(scheduler.schedule(MockRequest { region_id: 1 }).is_ok());
+ assert!(scheduler.schedule(MockRequest { region_id: 2 }).is_ok());
tokio::time::timeout(Duration::from_secs(1), latch.wait())
.await
@@ -472,11 +471,11 @@ mod tests {
let scheduler = LocalScheduler::new(config, handler);
for i in 0..task_size {
- scheduler
+ assert!(scheduler
.schedule(MockRequest {
region_id: i as RegionId,
})
- .unwrap();
+ .is_ok());
}
tokio::time::timeout(Duration::from_secs(3), latch.wait())
@@ -503,20 +502,20 @@ mod tests {
let scheduler = LocalScheduler::new(config, handler);
for i in 0..task_size / 2 {
- scheduler
+ assert!(scheduler
.schedule(MockRequest {
region_id: i as RegionId,
})
- .unwrap();
+ .is_ok());
}
tokio::time::sleep(Duration::from_millis(100)).await;
for i in task_size / 2..task_size {
- scheduler
+ assert!(scheduler
.schedule(MockRequest {
region_id: i as RegionId,
})
- .unwrap();
+ .is_ok());
}
tokio::time::timeout(Duration::from_secs(6), latch.wait())
@@ -552,7 +551,7 @@ mod tests {
let finished_clone = finished.clone();
let handler = MockHandler {
cb: move || {
- finished_clone.fetch_add(1, Ordering::Relaxed);
+ let _ = finished_clone.fetch_add(1, Ordering::Relaxed);
},
};
@@ -572,7 +571,7 @@ mod tests {
region_id: i as RegionId,
}) {
if res {
- task_scheduled_cloned.fetch_add(1, Ordering::Relaxed);
+ let _ = task_scheduled_cloned.fetch_add(1, Ordering::Relaxed);
}
}
diff --git a/src/storage/src/scheduler/dedup_deque.rs b/src/storage/src/scheduler/dedup_deque.rs
index 91cef075aeba..edb2184d8e2b 100644
--- a/src/storage/src/scheduler/dedup_deque.rs
+++ b/src/storage/src/scheduler/dedup_deque.rs
@@ -39,7 +39,7 @@ impl<K: Eq + Hash + Clone, V> DedupDeque<K, V> {
pub fn push_back(&mut self, key: K, value: V) -> bool {
debug_assert_eq!(self.deque.len(), self.existing.len());
if let Entry::Vacant(entry) = self.existing.entry(key.clone()) {
- entry.insert(value);
+ let _ = entry.insert(value);
self.deque.push_back(key);
return true;
}
@@ -51,7 +51,7 @@ impl<K: Eq + Hash + Clone, V> DedupDeque<K, V> {
/// returns false.
pub fn push_front(&mut self, key: K, value: V) -> bool {
if let Entry::Vacant(entry) = self.existing.entry(key.clone()) {
- entry.insert(value);
+ let _ = entry.insert(value);
self.deque.push_front(key);
return true;
}
diff --git a/src/storage/src/scheduler/rate_limit.rs b/src/storage/src/scheduler/rate_limit.rs
index 57efcbb1405c..63776b43f7a0 100644
--- a/src/storage/src/scheduler/rate_limit.rs
+++ b/src/storage/src/scheduler/rate_limit.rs
@@ -65,7 +65,7 @@ impl<R> RateLimiter for MaxInflightTaskLimiter<R> {
fn acquire_token(&self, _: &Self::Request) -> Result<BoxedRateLimitToken> {
if self.inflight_tasks.fetch_add(1, Ordering::Relaxed) >= self.max_inflight_tasks {
- self.inflight_tasks.fetch_sub(1, Ordering::Relaxed);
+ let _ = self.inflight_tasks.fetch_sub(1, Ordering::Relaxed);
return RateLimitedSnafu {
msg: format!(
"Max inflight task num exceeds, current: {}, max: {}",
@@ -103,7 +103,7 @@ impl RateLimitToken for MaxInflightLimiterToken {
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
- self.counter.fetch_sub(1, Ordering::Relaxed);
+ let _ = self.counter.fetch_sub(1, Ordering::Relaxed);
}
}
}
diff --git a/src/storage/src/schema/compat.rs b/src/storage/src/schema/compat.rs
index 5b2459b5187d..2deefaddb27d 100644
--- a/src/storage/src/schema/compat.rs
+++ b/src/storage/src/schema/compat.rs
@@ -516,7 +516,7 @@ mod tests {
let batch = tests::new_batch_with_num_values(2);
let mut columns = batch.columns().to_vec();
// Remove v0.
- columns.remove(2);
+ let _ = columns.remove(2);
let batch = Batch::new(columns);
let new_batch = call_batch_from_parts(&adapter, &batch, 1);
diff --git a/src/storage/src/sst.rs b/src/storage/src/sst.rs
index b7342f3fcfd4..49aefe717cd3 100644
--- a/src/storage/src/sst.rs
+++ b/src/storage/src/sst.rs
@@ -130,7 +130,7 @@ impl LevelMetas {
// we only update region's compaction time window iff region's window is not set and VersionEdit's
// compaction time window is present.
if let Some(window) = compaction_time_window {
- merged.compaction_time_window.get_or_insert(window);
+ let _ = merged.compaction_time_window.get_or_insert(window);
}
merged
}
@@ -178,7 +178,7 @@ impl LevelMeta {
}
fn add_file(&mut self, file: FileHandle) {
- self.files.insert(file.file_id(), file);
+ let _ = self.files.insert(file.file_id(), file);
}
fn remove_file(&mut self, file_to_remove: FileId) -> Option<FileHandle> {
diff --git a/src/storage/src/sst/parquet.rs b/src/storage/src/sst/parquet.rs
index 7f9d697ebf6b..e2dd03544148 100644
--- a/src/storage/src/sst/parquet.rs
+++ b/src/storage/src/sst/parquet.rs
@@ -133,7 +133,7 @@ impl<'a> ParquetWriter<'a> {
if rows_written == 0 {
debug!("No data written, try abort writer: {}", self.file_path);
- buffered_writer.close().await?;
+ let _ = buffered_writer.close().await?;
return Ok(None);
}
@@ -564,7 +564,7 @@ mod tests {
fn create_object_store(root: &str) -> ObjectStore {
let mut builder = Fs::default();
- builder.root(root);
+ let _ = builder.root(root);
ObjectStore::new(builder).unwrap().finish()
}
@@ -597,10 +597,10 @@ mod tests {
let iter = memtable.iter(IterContext::default()).unwrap();
let writer = ParquetWriter::new(sst_file_name, Source::Iter(iter), object_store.clone());
- writer
+ assert!(writer
.write_sst(&sst::WriteOptions::default())
.await
- .unwrap();
+ .is_ok());
// verify parquet file
let reader = BufReader::new(object_store.reader(sst_file_name).await.unwrap().compat());
@@ -1000,7 +1000,7 @@ mod tests {
let dir = create_temp_dir("write-empty-file");
let path = dir.path().to_str().unwrap();
let mut builder = Fs::default();
- builder.root(path);
+ let _ = builder.root(path);
let object_store = ObjectStore::new(builder).unwrap().finish();
let sst_file_name = "test-empty.parquet";
let iter = memtable.iter(IterContext::default()).unwrap();
diff --git a/src/storage/src/sst/stream_writer.rs b/src/storage/src/sst/stream_writer.rs
index 6ae2aff150d0..b1c14b89d0c9 100644
--- a/src/storage/src/sst/stream_writer.rs
+++ b/src/storage/src/sst/stream_writer.rs
@@ -100,7 +100,8 @@ impl BufferedWriter {
.write(&arrow_batch)
.await
.context(error::WriteBufferSnafu)?;
- self.inner
+ let _ = self
+ .inner
.try_flush(false)
.await
.context(error::WriteBufferSnafu)?;
diff --git a/src/storage/src/test_util/config_util.rs b/src/storage/src/test_util/config_util.rs
index 4fc1597b0121..c0ec72fc8772 100644
--- a/src/storage/src/test_util/config_util.rs
+++ b/src/storage/src/test_util/config_util.rs
@@ -43,7 +43,7 @@ pub async fn new_store_config(
engine_config: EngineConfig,
) -> StoreConfig<RaftEngineLogStore> {
let mut builder = Fs::default();
- builder.root(store_dir);
+ let _ = builder.root(store_dir);
let object_store = ObjectStore::new(builder).unwrap().finish();
new_store_config_with_object_store(region_name, store_dir, object_store, engine_config)
@@ -61,7 +61,7 @@ pub async fn new_store_config_and_region_map(
Arc<RegionMap<RaftEngineLogStore>>,
) {
let mut builder = Fs::default();
- builder.root(store_dir);
+ let _ = builder.root(store_dir);
let object_store = ObjectStore::new(builder).unwrap().finish();
new_store_config_with_object_store(region_name, store_dir, object_store, engine_config).await
diff --git a/src/storage/src/wal.rs b/src/storage/src/wal.rs
index 4826fdadde2f..473b5e4600b6 100644
--- a/src/storage/src/wal.rs
+++ b/src/storage/src/wal.rs
@@ -287,7 +287,7 @@ mod tests {
let wal = Wal::new(0, Arc::new(log_store));
let header = WalHeader::with_last_manifest_version(111);
let seq_num = 3;
- wal.write_to_wal(seq_num, header, None).await?;
+ let _ = wal.write_to_wal(seq_num, header, None).await?;
let mut stream = wal.read_from_wal(seq_num).await?;
let mut data = vec![];
diff --git a/src/storage/src/window_infer.rs b/src/storage/src/window_infer.rs
index efc256176bb4..b336002b8aa0 100644
--- a/src/storage/src/window_infer.rs
+++ b/src/storage/src/window_infer.rs
@@ -126,11 +126,11 @@ fn align_time_spans_to_windows(durations: &[(i64, i64)], min_duration: i64) -> H
while next <= *end {
let next_aligned = next.align_by_bucket(min_duration).unwrap_or(i64::MIN);
if let Some(next_end_aligned) = next_aligned.checked_add(min_duration) {
- res.insert((next_aligned, next_end_aligned));
+ let _ = res.insert((next_aligned, next_end_aligned));
next = next_end_aligned;
} else {
// arithmetic overflow, clamp to i64::MAX and break the loop.
- res.insert((next_aligned, i64::MAX));
+ let _ = res.insert((next_aligned, i64::MAX));
break;
}
}
diff --git a/src/storage/src/write_batch.rs b/src/storage/src/write_batch.rs
index 49fa53916b11..dd4ad897e451 100644
--- a/src/storage/src/write_batch.rs
+++ b/src/storage/src/write_batch.rs
@@ -366,9 +366,10 @@ mod tests {
let vector1 = Arc::new(Int32Vector::from_slice([1, 2, 3, 4, 5])) as VectorRef;
- let mut put_data = HashMap::with_capacity(3);
- put_data.insert("k1".to_string(), vector1.clone());
- put_data.insert("v1".to_string(), vector1);
+ let put_data = HashMap::from([
+ ("k1".to_string(), vector1.clone()),
+ ("v1".to_string(), vector1),
+ ]);
let columns = NameToVector::new(put_data).unwrap();
assert_eq!(5, columns.num_rows());
@@ -378,8 +379,7 @@ mod tests {
#[test]
fn test_name_to_vector_empty_vector() {
let vector1 = Arc::new(Int32Vector::from_slice([])) as VectorRef;
- let mut put_data = HashMap::new();
- put_data.insert("k1".to_string(), vector1);
+ let put_data = HashMap::from([("k1".to_string(), vector1)]);
let columns = NameToVector::new(put_data).unwrap();
assert_eq!(0, columns.num_rows());
@@ -391,11 +391,11 @@ mod tests {
let intv = Arc::new(UInt64Vector::from_slice([1, 2, 3])) as VectorRef;
let boolv = Arc::new(BooleanVector::from(vec![true, false, true])) as VectorRef;
let tsv = Arc::new(TimestampMillisecondVector::from_slice([0, 0, 0])) as VectorRef;
-
- let mut put_data = HashMap::with_capacity(4);
- put_data.insert("k1".to_string(), intv.clone());
- put_data.insert("v1".to_string(), boolv);
- put_data.insert("ts".to_string(), tsv);
+ let put_data = HashMap::from([
+ ("k1".to_string(), intv),
+ ("v1".to_string(), boolv),
+ ("ts".to_string(), tsv),
+ ]);
let mut batch = new_test_batch();
batch.put(put_data).unwrap();
@@ -418,9 +418,7 @@ mod tests {
let boolv = Arc::new(BooleanVector::from_iterator(
iter::repeat(true).take(MAX_BATCH_SIZE + 1),
)) as VectorRef;
-
- let mut put_data = HashMap::new();
- put_data.insert("k1".to_string(), boolv);
+ let put_data = HashMap::from([("k1".to_string(), boolv)]);
let mut batch =
write_batch_util::new_write_batch(&[("k1", LogicalTypeId::Boolean, false)], None, 1);
@@ -433,11 +431,11 @@ mod tests {
let intv = Arc::new(UInt64Vector::from_slice([1, 2, 3])) as VectorRef;
let tsv = Arc::new(TimestampMillisecondVector::from_slice([0, 0])) as VectorRef;
let boolv = Arc::new(BooleanVector::from(vec![true, false, true])) as VectorRef;
-
- let mut put_data = HashMap::new();
- put_data.insert("k1".to_string(), intv.clone());
- put_data.insert("v1".to_string(), boolv.clone());
- put_data.insert("ts".to_string(), tsv);
+ let put_data = HashMap::from([
+ ("k1".to_string(), intv),
+ ("v1".to_string(), boolv),
+ ("ts".to_string(), tsv),
+ ]);
let mut batch = new_test_batch();
let err = batch.put(put_data).unwrap_err();
@@ -448,10 +446,7 @@ mod tests {
fn test_put_type_mismatch() {
let boolv = Arc::new(BooleanVector::from(vec![true, false, true])) as VectorRef;
let tsv = Arc::new(Int64Vector::from_slice([0, 0, 0])) as VectorRef;
-
- let mut put_data = HashMap::new();
- put_data.insert("k1".to_string(), boolv);
- put_data.insert("ts".to_string(), tsv);
+ let put_data = HashMap::from([("k1".to_string(), boolv), ("ts".to_string(), tsv)]);
let mut batch = new_test_batch();
let err = batch.put(put_data).unwrap_err();
@@ -462,10 +457,7 @@ mod tests {
fn test_put_type_has_null() {
let intv = Arc::new(UInt64Vector::from(vec![Some(1), None, Some(3)])) as VectorRef;
let tsv = Arc::new(Int64Vector::from_slice([0, 0, 0])) as VectorRef;
-
- let mut put_data = HashMap::new();
- put_data.insert("k1".to_string(), intv);
- put_data.insert("ts".to_string(), tsv);
+ let put_data = HashMap::from([("k1".to_string(), intv), ("ts".to_string(), tsv)]);
let mut batch = new_test_batch();
let err = batch.put(put_data).unwrap_err();
@@ -476,10 +468,7 @@ mod tests {
fn test_put_missing_column() {
let boolv = Arc::new(BooleanVector::from(vec![true, false, true])) as VectorRef;
let tsv = Arc::new(TimestampMillisecondVector::from_slice([0, 0, 0])) as VectorRef;
-
- let mut put_data = HashMap::new();
- put_data.insert("v1".to_string(), boolv);
- put_data.insert("ts".to_string(), tsv);
+ let put_data = HashMap::from([("v1".to_string(), boolv), ("ts".to_string(), tsv)]);
let mut batch = new_test_batch();
let err = batch.put(put_data).unwrap_err();
@@ -491,12 +480,12 @@ mod tests {
let intv = Arc::new(UInt64Vector::from_slice([1, 2, 3])) as VectorRef;
let tsv = Arc::new(TimestampMillisecondVector::from_slice([0, 0, 0])) as VectorRef;
let boolv = Arc::new(BooleanVector::from(vec![true, false, true])) as VectorRef;
-
- let mut put_data = HashMap::new();
- put_data.insert("k1".to_string(), intv.clone());
- put_data.insert("v1".to_string(), boolv.clone());
- put_data.insert("ts".to_string(), tsv);
- put_data.insert("v2".to_string(), boolv);
+ let put_data = HashMap::from([
+ ("k1".to_string(), intv.clone()),
+ ("v1".to_string(), boolv.clone()),
+ ("ts".to_string(), tsv),
+ ("v2".to_string(), boolv),
+ ]);
let mut batch = new_test_batch();
let err = batch.put(put_data).unwrap_err();
@@ -521,10 +510,7 @@ mod tests {
fn test_write_batch_delete() {
let intv = Arc::new(UInt64Vector::from_slice([1, 2, 3])) as VectorRef;
let tsv = Arc::new(TimestampMillisecondVector::from_slice([0, 0, 0])) as VectorRef;
-
- let mut keys = HashMap::with_capacity(3);
- keys.insert("k1".to_string(), intv.clone());
- keys.insert("ts".to_string(), tsv);
+ let keys = HashMap::from([("k1".to_string(), intv), ("ts".to_string(), tsv)]);
let mut batch = new_test_batch();
batch.delete(keys).unwrap();
@@ -539,9 +525,7 @@ mod tests {
#[test]
fn test_delete_missing_column() {
let intv = Arc::new(UInt64Vector::from_slice([1, 2, 3])) as VectorRef;
-
- let mut keys = HashMap::with_capacity(3);
- keys.insert("k1".to_string(), intv.clone());
+ let keys = HashMap::from([("k1".to_string(), intv)]);
let mut batch = new_test_batch();
let err = batch.delete(keys).unwrap_err();
@@ -552,11 +536,11 @@ mod tests {
fn test_delete_columns_more_than_row_key() {
let intv = Arc::new(UInt64Vector::from_slice([1, 2, 3])) as VectorRef;
let tsv = Arc::new(TimestampMillisecondVector::from_slice([0, 0, 0])) as VectorRef;
-
- let mut keys = HashMap::with_capacity(3);
- keys.insert("k1".to_string(), intv.clone());
- keys.insert("ts".to_string(), tsv);
- keys.insert("v2".to_string(), intv);
+ let keys = HashMap::from([
+ ("k1".to_string(), intv.clone()),
+ ("ts".to_string(), tsv),
+ ("v2".to_string(), intv),
+ ]);
let mut batch = new_test_batch();
let err = batch.delete(keys).unwrap_err();
@@ -567,10 +551,7 @@ mod tests {
fn test_delete_type_mismatch() {
let intv = Arc::new(UInt64Vector::from_slice([1, 2, 3])) as VectorRef;
let boolv = Arc::new(BooleanVector::from(vec![true, false, true])) as VectorRef;
-
- let mut keys = HashMap::with_capacity(3);
- keys.insert("k1".to_string(), intv.clone());
- keys.insert("ts".to_string(), boolv);
+ let keys = HashMap::from([("k1".to_string(), intv.clone()), ("ts".to_string(), boolv)]);
let mut batch = new_test_batch();
let err = batch.delete(keys).unwrap_err();
@@ -581,10 +562,7 @@ mod tests {
fn test_delete_non_null_value() {
let intv = Arc::new(UInt64Vector::from_slice([1, 2, 3])) as VectorRef;
let tsv = Arc::new(TimestampMillisecondVector::from_slice([0, 0, 0])) as VectorRef;
-
- let mut keys = HashMap::with_capacity(2);
- keys.insert("k1".to_string(), intv.clone());
- keys.insert("ts".to_string(), tsv);
+ let keys = HashMap::from([("k1".to_string(), intv.clone()), ("ts".to_string(), tsv)]);
let mut batch = write_batch_util::new_write_batch(
&[
diff --git a/src/storage/src/write_batch/codec.rs b/src/storage/src/write_batch/codec.rs
index c7aae66658e4..0bef6a9ec169 100644
--- a/src/storage/src/write_batch/codec.rs
+++ b/src/storage/src/write_batch/codec.rs
@@ -147,10 +147,11 @@ mod tests {
Arc::new(BooleanVector::from(vec![Some(true), Some(false), None])) as VectorRef;
let tsv = Arc::new(TimestampMillisecondVector::from_vec(vec![i, i, i])) as VectorRef;
- let mut put_data = HashMap::new();
- put_data.insert("k1".to_string(), intv.clone());
- put_data.insert("v1".to_string(), boolv);
- put_data.insert("ts".to_string(), tsv);
+ let put_data = HashMap::from([
+ ("k1".to_string(), intv),
+ ("v1".to_string(), boolv),
+ ("ts".to_string(), tsv),
+ ]);
batch.put(put_data).unwrap();
}
@@ -183,9 +184,8 @@ mod tests {
let intv = Arc::new(UInt64Vector::from_slice([1, 2, 3])) as VectorRef;
let tsv = Arc::new(TimestampMillisecondVector::from_vec(vec![0, 0, 0])) as VectorRef;
- let mut put_data = HashMap::with_capacity(3);
- put_data.insert("k1".to_string(), intv.clone());
- put_data.insert("ts".to_string(), tsv);
+ let put_data =
+ HashMap::from([("k1".to_string(), intv.clone()), ("ts".to_string(), tsv)]);
batch.put(put_data).unwrap();
}
diff --git a/src/storage/src/write_batch/compat.rs b/src/storage/src/write_batch/compat.rs
index 85a582ea4e3b..3031c53f8303 100644
--- a/src/storage/src/write_batch/compat.rs
+++ b/src/storage/src/write_batch/compat.rs
@@ -144,14 +144,9 @@ mod tests {
}
fn new_put_data() -> HashMap<String, VectorRef> {
- let mut put_data = HashMap::new();
let k0 = Arc::new(Int32Vector::from_slice([1, 2, 3])) as VectorRef;
let ts = Arc::new(TimestampMillisecondVector::from_values([11, 12, 13])) as VectorRef;
-
- put_data.insert("k0".to_string(), k0);
- put_data.insert("ts".to_string(), ts);
-
- put_data
+ HashMap::from([("k0".to_string(), k0), ("ts".to_string(), ts)])
}
#[test]
@@ -188,7 +183,7 @@ mod tests {
assert_eq!(schema_new, *batch.schema());
let mutation = &batch.payload().mutations[0];
- mutation.record_batch.column_by_name("v0").unwrap();
+ assert!(mutation.record_batch.column_by_name("v0").is_some());
}
#[test]
diff --git a/src/store-api/src/storage/descriptors.rs b/src/store-api/src/storage/descriptors.rs
index 38598b271b9d..fce7d3609190 100644
--- a/src/store-api/src/storage/descriptors.rs
+++ b/src/store-api/src/storage/descriptors.rs
@@ -218,11 +218,11 @@ mod tests {
.unwrap();
assert_eq!("A test column", desc.comment);
- new_column_desc_builder()
+ assert!(new_column_desc_builder()
.is_nullable(false)
.default_constraint(Some(ColumnDefaultConstraint::Value(Value::Null)))
.build()
- .unwrap_err();
+ .is_err());
}
#[test]
diff --git a/src/table-procedure/src/alter.rs b/src/table-procedure/src/alter.rs
index 4f46cbb6e86e..33f40503506c 100644
--- a/src/table-procedure/src/alter.rs
+++ b/src/table-procedure/src/alter.rs
@@ -245,7 +245,8 @@ impl AlterTableProcedure {
table_id,
};
- self.catalog_manager
+ let _ = self
+ .catalog_manager
.rename_table(rename_req)
.await
.map_err(Error::from_error_ext)?;
diff --git a/src/table-procedure/src/create.rs b/src/table-procedure/src/create.rs
index 3d1f41a29e44..e2cd331ca9ba 100644
--- a/src/table-procedure/src/create.rs
+++ b/src/table-procedure/src/create.rs
@@ -251,7 +251,8 @@ impl CreateTableProcedure {
table_id: self.data.request.id,
table,
};
- self.catalog_manager
+ let _ = self
+ .catalog_manager
.register_table(register_req)
.await
.map_err(Error::from_error_ext)?;
@@ -383,10 +384,9 @@ mod tests {
// Execute the subprocedure.
let mut subprocedure = subprocedures.pop().unwrap();
execute_procedure_until_done(&mut subprocedure.procedure).await;
- let mut states = HashMap::new();
- states.insert(subprocedure.id, ProcedureState::Done);
+ let states = HashMap::from([(subprocedure.id, ProcedureState::Done)]);
// Execute the parent procedure once.
- execute_procedure_once(
+ let _ = execute_procedure_once(
procedure_id,
MockContextProvider::new(states),
&mut procedure,
diff --git a/src/table-procedure/src/drop.rs b/src/table-procedure/src/drop.rs
index a796bea1f0b3..1c0367d9357d 100644
--- a/src/table-procedure/src/drop.rs
+++ b/src/table-procedure/src/drop.rs
@@ -122,7 +122,8 @@ impl DropTableProcedure {
async fn on_prepare(&mut self) -> Result<Status> {
let request = &self.data.request;
// Ensure the table exists.
- self.catalog_manager
+ let _ = self
+ .catalog_manager
.table(
&request.catalog_name,
&request.schema_name,
diff --git a/src/table-procedure/src/test_util.rs b/src/table-procedure/src/test_util.rs
index 7e3b028d0542..487457ac1645 100644
--- a/src/table-procedure/src/test_util.rs
+++ b/src/table-procedure/src/test_util.rs
@@ -53,7 +53,7 @@ impl TestEnv {
pub fn from_temp_dir(dir: TempDir) -> TestEnv {
let store_dir = format!("{}/db", dir.path().to_string_lossy());
let mut builder = Fs::default();
- builder.root(&store_dir);
+ let _ = builder.root(&store_dir);
let object_store = ObjectStore::new(builder).unwrap().finish();
let compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
@@ -72,7 +72,7 @@ impl TestEnv {
let procedure_dir = format!("{}/procedure", dir.path().to_string_lossy());
let mut builder = Fs::default();
- builder.root(&procedure_dir);
+ let _ = builder.root(&procedure_dir);
let object_store = ObjectStore::new(builder).unwrap().finish();
let config = ManagerConfig {
diff --git a/src/table/src/engine/manager.rs b/src/table/src/engine/manager.rs
index aefe99cfbe4b..f89046ce22c6 100644
--- a/src/table/src/engine/manager.rs
+++ b/src/table/src/engine/manager.rs
@@ -54,8 +54,7 @@ impl MemoryTableEngineManager {
/// Create a new [MemoryTableEngineManager] with single table `engine` and
/// an alias `name` instead of the engine's name.
pub fn alias(name: String, engine: TableEngineRef) -> Self {
- let mut engines = HashMap::new();
- engines.insert(name, engine);
+ let engines = HashMap::from([(name, engine)]);
let engines = RwLock::new(engines);
MemoryTableEngineManager {
@@ -104,7 +103,7 @@ impl TableEngineManager for MemoryTableEngineManager {
EngineExistSnafu { engine: name }
);
- engines.insert(name.to_string(), engine);
+ let _ = engines.insert(name.to_string(), engine);
Ok(())
}
@@ -149,9 +148,9 @@ mod tests {
let table_engine_manager = MemoryTableEngineManager::new(table_engine_ref.clone());
// Attach engine procedures.
- let mut engine_procedures = HashMap::new();
let engine_procedure: TableEngineProcedureRef = table_engine_ref.clone();
- engine_procedures.insert(table_engine_ref.name().to_string(), engine_procedure);
+ let engine_procedures =
+ HashMap::from([(table_engine_ref.name().to_string(), engine_procedure)]);
let table_engine_manager = table_engine_manager.with_engine_procedures(engine_procedures);
table_engine_manager
diff --git a/src/table/src/metadata.rs b/src/table/src/metadata.rs
index e857ddf498a2..5abdab5ad181 100644
--- a/src/table/src/metadata.rs
+++ b/src/table/src/metadata.rs
@@ -186,7 +186,7 @@ impl TableMeta {
// No need to rebuild table meta when renaming tables.
AlterKind::RenameTable { .. } => {
let mut meta_builder = TableMetaBuilder::default();
- meta_builder
+ let _ = meta_builder
.schema(self.schema.clone())
.primary_key_indices(self.primary_key_indices.clone())
.engine(self.engine.clone())
@@ -225,7 +225,7 @@ impl TableMeta {
fn new_meta_builder(&self) -> TableMetaBuilder {
let mut builder = TableMetaBuilder::default();
- builder
+ let _ = builder
.engine(&self.engine)
.engine_options(self.engine_options.clone())
.options(self.options.clone())
@@ -301,7 +301,7 @@ impl TableMeta {
})?;
// value_indices would be generated automatically.
- meta_builder
+ let _ = meta_builder
.schema(Arc::new(new_schema))
.primary_key_indices(primary_key_indices);
@@ -380,7 +380,7 @@ impl TableMeta {
.map(|name| new_schema.column_index_by_name(name).unwrap())
.collect();
- meta_builder
+ let _ = meta_builder
.schema(Arc::new(new_schema))
.primary_key_indices(primary_key_indices);
diff --git a/src/table/src/predicate.rs b/src/table/src/predicate.rs
index 4068e5189e85..be58015e5e03 100644
--- a/src/table/src/predicate.rs
+++ b/src/table/src/predicate.rs
@@ -325,7 +325,7 @@ mod tests {
let rb = RecordBatch::try_new(schema.clone(), vec![name_array, count_array]).unwrap();
writer.write(&rb).unwrap();
}
- writer.close().unwrap();
+ assert!(writer.close().is_ok());
(path, schema)
}
diff --git a/src/table/src/requests.rs b/src/table/src/requests.rs
index 7e76ad8ba288..532241a49e89 100644
--- a/src/table/src/requests.rs
+++ b/src/table/src/requests.rs
@@ -128,14 +128,14 @@ impl From<&TableOptions> for HashMap<String, String> {
fn from(opts: &TableOptions) -> Self {
let mut res = HashMap::with_capacity(2 + opts.extra_options.len());
if let Some(write_buffer_size) = opts.write_buffer_size {
- res.insert(
+ let _ = res.insert(
WRITE_BUFFER_SIZE_KEY.to_string(),
write_buffer_size.to_string(),
);
}
if let Some(ttl) = opts.ttl {
let ttl_str = humantime::format_duration(ttl).to_string();
- res.insert(TTL_KEY.to_string(), ttl_str);
+ let _ = res.insert(TTL_KEY.to_string(), ttl_str);
}
res.extend(
opts.extra_options
diff --git a/src/table/src/test_util/mock_engine.rs b/src/table/src/test_util/mock_engine.rs
index 741af85f6ae7..4850849fd9ab 100644
--- a/src/table/src/test_util/mock_engine.rs
+++ b/src/table/src/test_util/mock_engine.rs
@@ -53,7 +53,8 @@ impl TableEngine for MockTableEngine {
let table_ref = Arc::new(EmptyTable::new(request));
- self.tables
+ let _ = self
+ .tables
.lock()
.await
.insert((catalog_name, schema_name, table_name), table_ref.clone());
diff --git a/tests-integration/src/cluster.rs b/tests-integration/src/cluster.rs
index 6900fa5f0f95..7425f55a2d50 100644
--- a/tests-integration/src/cluster.rs
+++ b/tests-integration/src/cluster.rs
@@ -156,8 +156,8 @@ impl GreptimeDbClusterBuilder {
let dn_instance = self.create_datanode(&opts, meta_srv.clone()).await;
- instances.insert(datanode_id, dn_instance.0.clone());
- heartbeat_tasks.insert(datanode_id, dn_instance.1);
+ let _ = instances.insert(datanode_id, dn_instance.0.clone());
+ let _ = heartbeat_tasks.insert(datanode_id, dn_instance.1);
}
(instances, heartbeat_tasks, storage_guards, wal_guards)
}
@@ -266,7 +266,7 @@ async fn create_datanode_client(datanode_instance: Arc<DatanodeInstance>) -> (St
None,
runtime,
);
- tokio::spawn(async move {
+ let _handle = tokio::spawn(async move {
Server::builder()
.add_service(grpc_server.create_flight_service())
.add_service(grpc_server.create_database_service())
@@ -280,7 +280,7 @@ async fn create_datanode_client(datanode_instance: Arc<DatanodeInstance>) -> (St
// "127.0.0.1:3001" is just a placeholder, does not actually connect to it.
let addr = "127.0.0.1:3001";
let channel_manager = ChannelManager::new();
- channel_manager
+ let _ = channel_manager
.reset_with_connector(
addr,
service_fn(move |_| {
diff --git a/tests-integration/src/grpc.rs b/tests-integration/src/grpc.rs
index db16b171de99..5f997f2fe7c5 100644
--- a/tests-integration/src/grpc.rs
+++ b/tests-integration/src/grpc.rs
@@ -358,7 +358,7 @@ CREATE TABLE {table_name} (
table_id,
*region,
);
- has_parquet_file(®ion_dir);
+ assert!(has_parquet_file(®ion_dir));
}
}
diff --git a/tests-integration/src/instance.rs b/tests-integration/src/instance.rs
index 2fd3c3b6d307..21b1edd73bc6 100644
--- a/tests-integration/src/instance.rs
+++ b/tests-integration/src/instance.rs
@@ -253,7 +253,7 @@ mod tests {
query: &'a str,
_query_ctx: QueryContextRef,
) -> Result<Cow<'a, str>> {
- self.c.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
+ let _ = self.c.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
assert!(query.starts_with("CREATE TABLE demo"));
Ok(Cow::Borrowed(query))
}
@@ -263,7 +263,7 @@ mod tests {
statements: Vec<Statement>,
_query_ctx: QueryContextRef,
) -> Result<Vec<Statement>> {
- self.c.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
+ let _ = self.c.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
assert!(matches!(statements[0], Statement::CreateTable(_)));
Ok(statements)
}
@@ -274,7 +274,7 @@ mod tests {
_plan: Option<&query::plan::LogicalPlan>,
_query_ctx: QueryContextRef,
) -> Result<()> {
- self.c.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
+ let _ = self.c.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
Ok(())
}
@@ -283,7 +283,7 @@ mod tests {
mut output: Output,
_query_ctx: QueryContextRef,
) -> Result<Output> {
- self.c.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
+ let _ = self.c.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
match &mut output {
Output::AffectedRows(rows) => {
assert_eq!(*rows, 0);
diff --git a/tests-integration/src/table.rs b/tests-integration/src/table.rs
index 4a2d21a3f4cd..05b2ad30797e 100644
--- a/tests-integration/src/table.rs
+++ b/tests-integration/src/table.rs
@@ -24,7 +24,7 @@ mod test {
use common_meta::table_name::TableName;
use common_query::logical_plan::Expr;
use common_query::physical_plan::DfPhysicalPlanAdapter;
- use common_query::DfPhysicalPlan;
+ use common_query::{DfPhysicalPlan, Output};
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::RecordBatches;
use datafusion::physical_plan::coalesce_partitions::CoalescePartitionsExec;
@@ -254,7 +254,7 @@ mod test {
for region_route in table_route.region_routes.iter() {
let region_id = region_route.region.id as u32;
let datanode_id = region_route.leader_peer.as_ref().unwrap().id;
- region_to_datanode_mapping.insert(region_id, datanode_id);
+ let _ = region_to_datanode_mapping.insert(region_id, datanode_id);
}
let mut global_start_ts = 1;
@@ -346,9 +346,10 @@ mod test {
let requests = InsertRequests {
inserts: vec![request],
};
- dn_instance
+ let Output::AffectedRows(x) = dn_instance
.handle_inserts(requests, &QueryContext::arc())
.await
- .unwrap();
+ .unwrap() else { unreachable!() };
+ assert_eq!(x as u32, row_count);
}
}
diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs
index a6d6812989d0..611d611debda 100644
--- a/tests-integration/src/test_util.rs
+++ b/tests-integration/src/test_util.rs
@@ -130,7 +130,7 @@ pub fn get_test_store_config(
};
let mut builder = Azblob::default();
- builder
+ let _ = builder
.root(&azblob_config.root)
.endpoint(&azblob_config.endpoint)
.account_name(azblob_config.account_name.expose_secret())
@@ -138,7 +138,7 @@ pub fn get_test_store_config(
.container(&azblob_config.container);
if let Ok(sas_token) = env::var("GT_AZBLOB_SAS_TOKEN") {
- builder.sas_token(&sas_token);
+ let _ = builder.sas_token(&sas_token);
}
let config = ObjectStoreConfig::Azblob(azblob_config);
@@ -158,7 +158,7 @@ pub fn get_test_store_config(
};
let mut builder = Oss::default();
- builder
+ let _ = builder
.root(&oss_config.root)
.endpoint(&oss_config.endpoint)
.access_key_id(oss_config.access_key_id.expose_secret())
@@ -179,17 +179,17 @@ pub fn get_test_store_config(
}
let mut builder = S3::default();
- builder
+ let _ = builder
.root(&s3_config.root)
.access_key_id(s3_config.access_key_id.expose_secret())
.secret_access_key(s3_config.secret_access_key.expose_secret())
.bucket(&s3_config.bucket);
if s3_config.endpoint.is_some() {
- builder.endpoint(s3_config.endpoint.as_ref().unwrap());
+ let _ = builder.endpoint(s3_config.endpoint.as_ref().unwrap());
}
if s3_config.region.is_some() {
- builder.region(s3_config.region.as_ref().unwrap());
+ let _ = builder.region(s3_config.region.as_ref().unwrap());
}
let config = ObjectStoreConfig::S3(s3_config);
@@ -315,7 +315,7 @@ pub async fn create_test_table(
table_id: table.table_info().ident.table_id,
table,
};
- catalog_manager.register_table(req).await.unwrap();
+ assert!(catalog_manager.register_table(req).await.is_ok());
Ok(())
}
@@ -390,22 +390,28 @@ pub async fn setup_test_http_app_with_frontend(
}
fn mock_insert_request(host: &str, cpu: f64, memory: f64, ts: i64) -> InsertRequest {
- let mut columns_values = HashMap::with_capacity(4);
let mut builder = StringVectorBuilder::with_capacity(1);
builder.push(Some(host));
- columns_values.insert("host".to_string(), builder.to_vector());
+ let host = builder.to_vector();
let mut builder = Float64VectorBuilder::with_capacity(1);
builder.push(Some(cpu));
- columns_values.insert("cpu".to_string(), builder.to_vector());
+ let cpu = builder.to_vector();
let mut builder = Float64VectorBuilder::with_capacity(1);
builder.push(Some(memory));
- columns_values.insert("memory".to_string(), builder.to_vector());
+ let memory = builder.to_vector();
let mut builder = TimestampMillisecondVectorBuilder::with_capacity(1);
builder.push(Some(ts.into()));
- columns_values.insert("ts".to_string(), builder.to_vector());
+ let ts = builder.to_vector();
+
+ let columns_values = HashMap::from([
+ ("host".to_string(), host),
+ ("cpu".to_string(), cpu),
+ ("memory".to_string(), memory),
+ ("ts".to_string(), ts),
+ ]);
InsertRequest {
catalog_name: common_catalog::consts::DEFAULT_CATALOG_NAME.to_string(),
@@ -573,7 +579,7 @@ pub async fn setup_mysql_server(
let fe_mysql_addr_clone = fe_mysql_addr.clone();
let fe_mysql_server_clone = fe_mysql_server.clone();
- tokio::spawn(async move {
+ let _handle = tokio::spawn(async move {
let addr = fe_mysql_addr_clone.parse::<SocketAddr>().unwrap();
fe_mysql_server_clone.start(addr).await.unwrap()
});
@@ -623,7 +629,7 @@ pub async fn setup_pg_server(
let fe_pg_addr_clone = fe_pg_addr.clone();
let fe_pg_server_clone = fe_pg_server.clone();
- tokio::spawn(async move {
+ let _handle = tokio::spawn(async move {
let addr = fe_pg_addr_clone.parse::<SocketAddr>().unwrap();
fe_pg_server_clone.start(addr).await.unwrap()
});
diff --git a/tests-integration/src/tests.rs b/tests-integration/src/tests.rs
index ca6bfaf60d31..cb465afecf3d 100644
--- a/tests-integration/src/tests.rs
+++ b/tests-integration/src/tests.rs
@@ -73,20 +73,20 @@ pub(crate) async fn create_standalone_instance(test_name: &str) -> MockStandalon
.await
.unwrap();
- dn_instance
+ assert!(dn_instance
.catalog_manager()
.register_catalog("another_catalog".to_string())
.await
- .unwrap();
+ .is_ok());
let req = RegisterSchemaRequest {
catalog: "another_catalog".to_string(),
schema: "another_schema".to_string(),
};
- dn_instance
+ assert!(dn_instance
.catalog_manager()
.register_schema(req)
.await
- .unwrap();
+ .is_ok());
dn_instance.start().await.unwrap();
if let Some(heartbeat) = heartbeat {
diff --git a/tests-integration/src/tests/instance_test.rs b/tests-integration/src/tests/instance_test.rs
index 8f975229b92c..e2a6a4110172 100644
--- a/tests-integration/src/tests/instance_test.rs
+++ b/tests-integration/src/tests/instance_test.rs
@@ -230,11 +230,14 @@ async fn test_execute_insert(instance: Arc<dyn MockInstance>) {
let instance = instance.frontend();
// create table
- execute_sql(
- &instance,
- "create table demo(host string, cpu double, memory double, ts timestamp time index);",
- )
- .await;
+ assert!(matches!(
+ execute_sql(
+ &instance,
+ "create table demo(host string, cpu double, memory double, ts timestamp time index);",
+ )
+ .await,
+ Output::AffectedRows(0)
+ ));
let output = execute_sql(
&instance,
@@ -252,16 +255,22 @@ async fn test_execute_insert_by_select(instance: Arc<dyn MockInstance>) {
let instance = instance.frontend();
// create table
- execute_sql(
- &instance,
- "create table demo1(host string, cpu double, memory double, ts timestamp time index);",
- )
- .await;
- execute_sql(
- &instance,
- "create table demo2(host string, cpu double, memory double, ts timestamp time index);",
- )
- .await;
+ assert!(matches!(
+ execute_sql(
+ &instance,
+ "create table demo1(host string, cpu double, memory double, ts timestamp time index);",
+ )
+ .await,
+ Output::AffectedRows(0)
+ ));
+ assert!(matches!(
+ execute_sql(
+ &instance,
+ "create table demo2(host string, cpu double, memory double, ts timestamp time index);",
+ )
+ .await,
+ Output::AffectedRows(0)
+ ));
let output = execute_sql(
&instance,
@@ -311,10 +320,10 @@ async fn test_execute_insert_by_select(instance: Arc<dyn MockInstance>) {
async fn test_execute_insert_query_with_i64_timestamp(instance: Arc<dyn MockInstance>) {
let instance = instance.frontend();
- execute_sql(
+ assert!(matches!(execute_sql(
&instance,
"create table demo(host string, cpu double, memory double, ts bigint time index, primary key (host));",
- ).await;
+ ).await, Output::AffectedRows(0)));
let output = execute_sql(
&instance,
@@ -419,10 +428,10 @@ async fn test_execute_show_databases_tables(instance: Arc<dyn MockInstance>) {
let output = execute_sql(&instance, "show tables").await;
check_unordered_output_stream(output, expected).await;
- execute_sql(
+ assert!(matches!(execute_sql(
&instance,
"create table demo(host string, cpu double, memory double, ts timestamp time index, primary key (host));",
- ).await;
+ ).await, Output::AffectedRows(0)));
let output = execute_sql(&instance, "show tables").await;
let expected = "\
@@ -806,9 +815,11 @@ async fn test_rename_table(instance: Arc<dyn MockInstance>) {
+-------+-----+--------+---------------------+";
check_output_stream(output, expected).await;
- try_execute_sql_with(&instance, "select * from demo", query_ctx)
- .await
- .expect_err("no table found in expect");
+ assert!(
+ try_execute_sql_with(&instance, "select * from demo", query_ctx)
+ .await
+ .is_err()
+ );
}
// should apply to both instances. tracked in #723
@@ -867,18 +878,24 @@ async fn test_alter_table(instance: Arc<dyn MockInstance>) {
let instance = instance.frontend();
// create table
- execute_sql(
- &instance,
- "create table demo(host string, cpu double, memory double, ts timestamp time index);",
- )
- .await;
+ assert!(matches!(
+ execute_sql(
+ &instance,
+ "create table demo(host string, cpu double, memory double, ts timestamp time index);",
+ )
+ .await,
+ Output::AffectedRows(0)
+ ));
// make sure table insertion is ok before altering table
- execute_sql(
- &instance,
- "insert into demo(host, cpu, memory, ts) values ('host1', 1.1, 100, 1000)",
- )
- .await;
+ assert!(matches!(
+ execute_sql(
+ &instance,
+ "insert into demo(host, cpu, memory, ts) values ('host1', 1.1, 100, 1000)",
+ )
+ .await,
+ Output::AffectedRows(0)
+ ));
// Add column
let output = execute_sql(&instance, "alter table demo add my_tag string null").await;
@@ -1100,11 +1117,11 @@ async fn test_execute_copy_to_s3(instance: Arc<dyn MockInstance>) {
let instance = instance.frontend();
// setups
- execute_sql(
+ assert!(matches!(execute_sql(
&instance,
"create table demo(host string, cpu double, memory double, ts timestamp time index);",
)
- .await;
+ .await, Output::AffectedRows(0)));
let output = execute_sql(
&instance,
@@ -1138,11 +1155,11 @@ async fn test_execute_copy_from_s3(instance: Arc<dyn MockInstance>) {
let instance = instance.frontend();
// setups
- execute_sql(
+ assert!(matches!(execute_sql(
&instance,
"create table demo(host string, cpu double, memory double, ts timestamp time index);",
)
- .await;
+ .await, Output::AffectedRows(0)));
let output = execute_sql(
&instance,
@@ -1192,14 +1209,17 @@ async fn test_execute_copy_from_s3(instance: Arc<dyn MockInstance>) {
for test in tests {
// import
- execute_sql(
- &instance,
- &format!(
+ assert!(matches!(
+ execute_sql(
+ &instance,
+ &format!(
"create table {}(host string, cpu double, memory double, ts timestamp time index);",
test.table_name
),
- )
- .await;
+ )
+ .await,
+ Output::AffectedRows(0)
+ ));
let sql = format!(
"{} CONNECTION (ACCESS_KEY_ID='{}',SECRET_ACCESS_KEY='{}',REGION='{}')",
test.sql, key_id, key, region,
@@ -1233,11 +1253,11 @@ async fn test_execute_copy_from_orc(instance: Arc<dyn MockInstance>) {
let instance = instance.frontend();
// setups
- execute_sql(
+ assert!(matches!(execute_sql(
&instance,
"create table demo(double_a double, a float, b boolean, str_direct string, d string, e string, f string, int_short_repeated int, int_neg_short_repeated int, int_delta int, int_neg_delta int, int_direct int, int_neg_direct int, bigint_direct bigint, bigint_neg_direct bigint, bigint_other bigint, utf8_increase string, utf8_decrease string, timestamp_simple timestamp(9) time index, date_simple date);",
)
- .await;
+ .await, Output::AffectedRows(0)));
let filepath = get_data_dir("../src/common/datasource/tests/orc/test.orc")
.canonicalize()
@@ -1271,11 +1291,11 @@ async fn test_cast_type_issue_1594(instance: Arc<dyn MockInstance>) {
let instance = instance.frontend();
// setups
- execute_sql(
+ assert!(matches!(execute_sql(
&instance,
"create table tsbs_cpu(hostname STRING, environment STRING, usage_user DOUBLE, usage_system DOUBLE, usage_idle DOUBLE, usage_nice DOUBLE, usage_iowait DOUBLE, usage_irq DOUBLE, usage_softirq DOUBLE, usage_steal DOUBLE, usage_guest DOUBLE, usage_guest_nice DOUBLE, ts TIMESTAMP TIME INDEX, PRIMARY KEY(hostname));",
)
- .await;
+ .await, Output::AffectedRows(0)));
let filepath = get_data_dir("../src/common/datasource/tests/csv/type_cast.csv")
.canonicalize()
.unwrap()
diff --git a/tests-integration/src/tests/promql_test.rs b/tests-integration/src/tests/promql_test.rs
index 8cb798d0671c..d4533c17deb6 100644
--- a/tests-integration/src/tests/promql_test.rs
+++ b/tests-integration/src/tests/promql_test.rs
@@ -37,8 +37,8 @@ async fn create_insert_query_assert(
lookback: Duration,
expected: &str,
) {
- instance.do_query(create, QueryContext::arc()).await;
- instance.do_query(insert, QueryContext::arc()).await;
+ let _ = instance.do_query(create, QueryContext::arc()).await;
+ let _ = instance.do_query(insert, QueryContext::arc()).await;
let query = PromQuery {
query: promql.to_string(),
@@ -66,8 +66,8 @@ async fn create_insert_tql_assert(
tql: &str,
expected: &str,
) {
- instance.do_query(create, QueryContext::arc()).await;
- instance.do_query(insert, QueryContext::arc()).await;
+ let _ = instance.do_query(create, QueryContext::arc()).await;
+ let _ = instance.do_query(insert, QueryContext::arc()).await;
let query_output = instance
.do_query(tql, QueryContext::arc())
diff --git a/tests-integration/tests/grpc.rs b/tests-integration/tests/grpc.rs
index b9d59f289d3f..2145f809fc5b 100644
--- a/tests-integration/tests/grpc.rs
+++ b/tests-integration/tests/grpc.rs
@@ -347,12 +347,18 @@ pub async fn test_prom_gateway_query(store_type: StorageType) {
let mut gateway_client = grpc_client.make_prometheus_gateway_client().unwrap();
// create table and insert data
- db.sql("CREATE TABLE test(i DOUBLE, j TIMESTAMP TIME INDEX, k STRING PRIMARY KEY);")
- .await
- .unwrap();
- db.sql(r#"INSERT INTO test VALUES (1, 1, "a"), (1, 1, "b"), (2, 2, "a");"#)
- .await
- .unwrap();
+ assert!(matches!(
+ db.sql("CREATE TABLE test(i DOUBLE, j TIMESTAMP TIME INDEX, k STRING PRIMARY KEY);")
+ .await
+ .unwrap(),
+ Output::AffectedRows(0)
+ ));
+ assert!(matches!(
+ db.sql(r#"INSERT INTO test VALUES (1, 1, "a"), (1, 1, "b"), (2, 2, "a");"#)
+ .await
+ .unwrap(),
+ Output::AffectedRows(3)
+ ));
// Instant query using prometheus gateway service
let header = RequestHeader {
diff --git a/tests-integration/tests/sql.rs b/tests-integration/tests/sql.rs
index 313dbff53f94..db1496647c1d 100644
--- a/tests-integration/tests/sql.rs
+++ b/tests-integration/tests/sql.rs
@@ -63,18 +63,18 @@ pub async fn test_mysql_crud(store_type: StorageType) {
.await
.unwrap();
- sqlx::query(
+ assert!(sqlx::query(
"create table demo(i bigint, ts timestamp time index, d date, dt datetime, b blob)",
)
.execute(&pool)
.await
- .unwrap();
+ .is_ok());
for i in 0..10 {
let dt = DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp_opt(60, i).unwrap(), Utc);
let d = NaiveDate::from_yo_opt(2015, 100).unwrap();
let hello = format!("hello{i}");
let bytes = hello.as_bytes();
- sqlx::query("insert into demo values(?, ?, ?, ?, ?)")
+ assert!(sqlx::query("insert into demo values(?, ?, ?, ?, ?)")
.bind(i)
.bind(i)
.bind(d)
@@ -82,7 +82,7 @@ pub async fn test_mysql_crud(store_type: StorageType) {
.bind(bytes)
.execute(&pool)
.await
- .unwrap();
+ .is_ok());
}
let rows = sqlx::query("select i, d, dt, b from demo")
@@ -122,10 +122,7 @@ pub async fn test_mysql_crud(store_type: StorageType) {
assert_eq!(ret, 6);
}
- sqlx::query("delete from demo")
- .execute(&pool)
- .await
- .unwrap();
+ assert!(sqlx::query("delete from demo").execute(&pool).await.is_ok());
let rows = sqlx::query("select i from demo")
.fetch_all(&pool)
.await
@@ -145,17 +142,19 @@ pub async fn test_postgres_crud(store_type: StorageType) {
.await
.unwrap();
- sqlx::query("create table demo(i bigint, ts timestamp time index)")
- .execute(&pool)
- .await
- .unwrap();
+ assert!(
+ sqlx::query("create table demo(i bigint, ts timestamp time index)")
+ .execute(&pool)
+ .await
+ .is_ok()
+ );
for i in 0..10 {
- sqlx::query("insert into demo values($1, $2)")
+ assert!(sqlx::query("insert into demo values($1, $2)")
.bind(i)
.bind(i)
.execute(&pool)
.await
- .unwrap();
+ .is_ok());
}
let rows = sqlx::query("select i from demo")
@@ -181,10 +180,7 @@ pub async fn test_postgres_crud(store_type: StorageType) {
assert_eq!(ret, 6);
}
- sqlx::query("delete from demo")
- .execute(&pool)
- .await
- .unwrap();
+ assert!(sqlx::query("delete from demo").execute(&pool).await.is_ok());
let rows = sqlx::query("select i from demo")
.fetch_all(&pool)
.await
diff --git a/tests/runner/src/env.rs b/tests/runner/src/env.rs
index 875e266da855..2633c6a0e169 100644
--- a/tests/runner/src/env.rs
+++ b/tests/runner/src/env.rs
@@ -367,7 +367,7 @@ impl GreptimeDBContext {
}
fn incr_datanode_id(&self) {
- self.datanode_id.fetch_add(1, Ordering::Relaxed);
+ let _ = self.datanode_id.fetch_add(1, Ordering::Relaxed);
}
fn datanode_id(&self) -> u32 {
diff --git a/tests/runner/src/util.rs b/tests/runner/src/util.rs
index 47cb84eb3521..a052b6e3e34b 100644
--- a/tests/runner/src/util.rs
+++ b/tests/runner/src/util.rs
@@ -67,7 +67,7 @@ pub fn get_case_dir() -> String {
let mut runner_crate_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
// change directory to cases' dir from runner's (should be runner/../cases)
- runner_crate_path.pop();
+ let _ = runner_crate_path.pop();
runner_crate_path.push("cases");
runner_crate_path.into_os_string().into_string().unwrap()
@@ -79,8 +79,8 @@ pub fn get_workspace_root() -> String {
let mut runner_crate_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
// change directory to workspace's root (runner/../..)
- runner_crate_path.pop();
- runner_crate_path.pop();
+ let _ = runner_crate_path.pop();
+ let _ = runner_crate_path.pop();
runner_crate_path.into_os_string().into_string().unwrap()
}
|
chore
|
deny unused results (#1825)
|
20a933e3951b06bc1ecb65a3f02f4261d293aa69
|
2024-04-24 09:59:41
|
Weny Xu
|
refactor: simplify the PaginationStream (#3787)
| false
|
diff --git a/src/cmd/src/cli/upgrade.rs b/src/cmd/src/cli/upgrade.rs
index 0aa787fbe965..8d1be2c3c554 100644
--- a/src/cmd/src/cli/upgrade.rs
+++ b/src/cmd/src/cli/upgrade.rs
@@ -192,10 +192,10 @@ impl MigrateTableMetadata {
let key = v1SchemaKey::parse(key_str)
.unwrap_or_else(|e| panic!("schema key is corrupted: {e}, key: {key_str}"));
- Ok((key, ()))
+ Ok(key)
}),
);
- while let Some((key, _)) = stream.try_next().await.context(error::IterStreamSnafu)? {
+ while let Some(key) = stream.try_next().await.context(error::IterStreamSnafu)? {
let _ = self.migrate_schema_key(&key).await;
keys.push(key.to_string().as_bytes().to_vec());
}
@@ -244,10 +244,10 @@ impl MigrateTableMetadata {
let key = v1CatalogKey::parse(key_str)
.unwrap_or_else(|e| panic!("catalog key is corrupted: {e}, key: {key_str}"));
- Ok((key, ()))
+ Ok(key)
}),
);
- while let Some((key, _)) = stream.try_next().await.context(error::IterStreamSnafu)? {
+ while let Some(key) = stream.try_next().await.context(error::IterStreamSnafu)? {
let _ = self.migrate_catalog_key(&key).await;
keys.push(key.to_string().as_bytes().to_vec());
}
diff --git a/src/common/meta/src/key/catalog_name.rs b/src/common/meta/src/key/catalog_name.rs
index 4bbfb367b9f4..63873177b1b7 100644
--- a/src/common/meta/src/key/catalog_name.rs
+++ b/src/common/meta/src/key/catalog_name.rs
@@ -17,7 +17,6 @@ use std::sync::Arc;
use common_catalog::consts::DEFAULT_CATALOG_NAME;
use futures::stream::BoxStream;
-use futures::StreamExt;
use serde::{Deserialize, Serialize};
use snafu::{OptionExt, ResultExt};
@@ -84,11 +83,11 @@ impl<'a> TryFrom<&'a str> for CatalogNameKey<'a> {
}
/// Decoder `KeyValue` to ({catalog},())
-pub fn catalog_decoder(kv: KeyValue) -> Result<(String, ())> {
+pub fn catalog_decoder(kv: KeyValue) -> Result<String> {
let str = std::str::from_utf8(&kv.key).context(error::ConvertRawKeySnafu)?;
let catalog_name = CatalogNameKey::try_from(str)?;
- Ok((catalog_name.catalog.to_string(), ()))
+ Ok(catalog_name.catalog.to_string())
}
pub struct CatalogManager {
@@ -134,7 +133,7 @@ impl CatalogManager {
Arc::new(catalog_decoder),
);
- Box::pin(stream.map(|kv| kv.map(|kv| kv.0)))
+ Box::pin(stream)
}
}
diff --git a/src/common/meta/src/key/datanode_table.rs b/src/common/meta/src/key/datanode_table.rs
index aa63eef09e1f..96bebb74662e 100644
--- a/src/common/meta/src/key/datanode_table.rs
+++ b/src/common/meta/src/key/datanode_table.rs
@@ -16,7 +16,6 @@ use std::collections::HashMap;
use std::sync::Arc;
use futures::stream::BoxStream;
-use futures::StreamExt;
use serde::{Deserialize, Serialize};
use snafu::OptionExt;
use store_api::storage::RegionNumber;
@@ -126,10 +125,8 @@ impl DatanodeTableValue {
}
/// Decodes `KeyValue` to ((),`DatanodeTableValue`)
-pub fn datanode_table_value_decoder(kv: KeyValue) -> Result<((), DatanodeTableValue)> {
- let value = DatanodeTableValue::try_from_raw_value(&kv.value)?;
-
- Ok(((), value))
+pub fn datanode_table_value_decoder(kv: KeyValue) -> Result<DatanodeTableValue> {
+ DatanodeTableValue::try_from_raw_value(&kv.value)
}
pub struct DatanodeTableManager {
@@ -163,7 +160,7 @@ impl DatanodeTableManager {
Arc::new(datanode_table_value_decoder),
);
- Box::pin(stream.map(|kv| kv.map(|kv| kv.1)))
+ Box::pin(stream)
}
/// Builds the create datanode table transactions. It only executes while the primary keys comparing successes.
diff --git a/src/common/meta/src/key/schema_name.rs b/src/common/meta/src/key/schema_name.rs
index 17fdb6a4c220..f56adbaec440 100644
--- a/src/common/meta/src/key/schema_name.rs
+++ b/src/common/meta/src/key/schema_name.rs
@@ -19,7 +19,6 @@ use std::time::Duration;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use futures::stream::BoxStream;
-use futures::StreamExt;
use humantime_serde::re::humantime;
use serde::{Deserialize, Serialize};
use snafu::{OptionExt, ResultExt};
@@ -103,11 +102,11 @@ impl TableMetaKey for SchemaNameKey<'_> {
}
/// Decodes `KeyValue` to ({schema},())
-pub fn schema_decoder(kv: KeyValue) -> Result<(String, ())> {
+pub fn schema_decoder(kv: KeyValue) -> Result<String> {
let str = std::str::from_utf8(&kv.key).context(error::ConvertRawKeySnafu)?;
let schema_name = SchemaNameKey::try_from(str)?;
- Ok((schema_name.schema.to_string(), ()))
+ Ok(schema_name.schema.to_string())
}
impl<'a> TryFrom<&'a str> for SchemaNameKey<'a> {
@@ -193,7 +192,7 @@ impl SchemaManager {
Arc::new(schema_decoder),
);
- Box::pin(stream.map(|kv| kv.map(|kv| kv.0)))
+ Box::pin(stream)
}
}
diff --git a/src/common/meta/src/range_stream.rs b/src/common/meta/src/range_stream.rs
index 878d5c0c9f24..b14a2bf8f1b4 100644
--- a/src/common/meta/src/range_stream.rs
+++ b/src/common/meta/src/range_stream.rs
@@ -28,13 +28,13 @@ use crate::rpc::store::{RangeRequest, RangeResponse};
use crate::rpc::KeyValue;
use crate::util::get_next_prefix_key;
-pub type KeyValueDecoderFn<K, V> = dyn Fn(KeyValue) -> Result<(K, V)> + Send + Sync;
+pub type KeyValueDecoderFn<T> = dyn Fn(KeyValue) -> Result<T> + Send + Sync;
-enum PaginationStreamState<K, V> {
+enum PaginationStreamState<T> {
/// At the start of reading.
Init,
/// Decoding key value pairs.
- Decoding(SimpleKeyValueDecoder<K, V>),
+ Decoding(SimpleKeyValueDecoder<T>),
/// Retrieving data from backend.
Reading(BoxFuture<'static, Result<(PaginationStreamFactory, Option<RangeResponse>)>>),
/// Error
@@ -77,7 +77,7 @@ struct PaginationStreamFactory {
}
impl PaginationStreamFactory {
- pub fn new(
+ fn new(
kv: &KvBackendRef,
key: Vec<u8>,
range_end: Vec<u8>,
@@ -137,7 +137,7 @@ impl PaginationStreamFactory {
}
}
- pub async fn read_next(mut self) -> Result<(Self, Option<RangeResponse>)> {
+ async fn read_next(mut self) -> Result<(Self, Option<RangeResponse>)> {
if self.more {
let resp = self
.adaptive_range(RangeRequest {
@@ -174,18 +174,19 @@ impl PaginationStreamFactory {
}
}
-pub struct PaginationStream<K, V> {
- state: PaginationStreamState<K, V>,
- decoder_fn: Arc<KeyValueDecoderFn<K, V>>,
+pub struct PaginationStream<T> {
+ state: PaginationStreamState<T>,
+ decoder_fn: Arc<KeyValueDecoderFn<T>>,
factory: Option<PaginationStreamFactory>,
}
-impl<K, V> PaginationStream<K, V> {
+impl<T> PaginationStream<T> {
+ /// Returns a new [PaginationStream].
pub fn new(
kv: KvBackendRef,
req: RangeRequest,
page_size: usize,
- decoder_fn: Arc<KeyValueDecoderFn<K, V>>,
+ decoder_fn: Arc<KeyValueDecoderFn<T>>,
) -> Self {
Self {
state: PaginationStreamState::Init,
@@ -202,13 +203,13 @@ impl<K, V> PaginationStream<K, V> {
}
}
-struct SimpleKeyValueDecoder<K, V> {
+struct SimpleKeyValueDecoder<T> {
kv: VecDeque<KeyValue>,
- decoder: Arc<KeyValueDecoderFn<K, V>>,
+ decoder: Arc<KeyValueDecoderFn<T>>,
}
-impl<K, V> Iterator for SimpleKeyValueDecoder<K, V> {
- type Item = Result<(K, V)>;
+impl<T> Iterator for SimpleKeyValueDecoder<T> {
+ type Item = Result<T>;
fn next(&mut self) -> Option<Self::Item> {
if let Some(kv) = self.kv.pop_front() {
@@ -219,8 +220,8 @@ impl<K, V> Iterator for SimpleKeyValueDecoder<K, V> {
}
}
-impl<K, V> Stream for PaginationStream<K, V> {
- type Item = Result<(K, V)>;
+impl<T> Stream for PaginationStream<T> {
+ type Item = Result<T>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
loop {
diff --git a/src/meta-srv/src/service/store/cached_kv.rs b/src/meta-srv/src/service/store/cached_kv.rs
index 8772c47a1d70..8bf02dc0471e 100644
--- a/src/meta-srv/src/service/store/cached_kv.rs
+++ b/src/meta-srv/src/service/store/cached_kv.rs
@@ -102,15 +102,10 @@ impl LeaderCachedKvBackend {
self.store.clone(),
RangeRequest::new().with_prefix(prefix.as_bytes()),
DEFAULT_PAGE_SIZE,
- Arc::new(|kv| Ok((kv, ()))),
+ Arc::new(Ok),
);
- let kvs = stream
- .try_collect::<Vec<_>>()
- .await?
- .into_iter()
- .map(|(kv, _)| kv)
- .collect();
+ let kvs = stream.try_collect::<Vec<_>>().await?.into_iter().collect();
self.cache
.batch_put(BatchPutRequest {
|
refactor
|
simplify the PaginationStream (#3787)
|
35ba0868b51fd06d2696744396b87e54431eb190
|
2022-09-21 09:17:55
|
Lei, Huang
|
feat: impl filter push down to parquet reader (#262)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index dafdb2e3b6f5..ab6ecd3918db 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4897,6 +4897,7 @@ dependencies = [
"bytes",
"common-base",
"common-error",
+ "common-query",
"common-runtime",
"common-telemetry",
"common-time",
@@ -4916,6 +4917,7 @@ dependencies = [
"serde_json",
"snafu",
"store-api",
+ "table",
"tempdir",
"tokio",
"tonic",
@@ -4932,6 +4934,7 @@ dependencies = [
"bytes",
"common-base",
"common-error",
+ "common-query",
"common-time",
"datatypes",
"derive_builder",
@@ -5090,14 +5093,21 @@ dependencies = [
"common-error",
"common-query",
"common-recordbatch",
+ "common-telemetry",
"datafusion",
"datafusion-common",
+ "datafusion-expr",
"datatypes",
"derive_builder",
"futures",
+ "parquet-format-async-temp",
+ "paste",
"serde",
"snafu",
"store-api",
+ "tempdir",
+ "tokio",
+ "tokio-util",
]
[[package]]
@@ -5447,6 +5457,7 @@ checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740"
dependencies = [
"bytes",
"futures-core",
+ "futures-io",
"futures-sink",
"pin-project-lite",
"tokio",
diff --git a/src/query/src/datafusion/plan_adapter.rs b/src/query/src/datafusion/plan_adapter.rs
index 69852fb7e7ee..d71f2b9f6f6e 100644
--- a/src/query/src/datafusion/plan_adapter.rs
+++ b/src/query/src/datafusion/plan_adapter.rs
@@ -1,5 +1,5 @@
use std::any::Any;
-use std::fmt::{self, Debug};
+use std::fmt::Debug;
use std::sync::Arc;
use common_recordbatch::SendableRecordBatchStream;
@@ -22,6 +22,7 @@ use crate::executor::Runtime;
use crate::plan::{Partitioning, PhysicalPlan};
/// Datafusion ExecutionPlan -> greptime PhysicalPlan
+#[derive(Debug)]
pub struct PhysicalPlanAdapter {
plan: Arc<dyn ExecutionPlan>,
schema: SchemaRef,
@@ -109,18 +110,12 @@ impl PhysicalPlan for PhysicalPlanAdapter {
}
/// Greptime PhysicalPlan -> datafusion ExecutionPlan.
+#[derive(Debug)]
struct ExecutionPlanAdapter {
plan: Arc<dyn PhysicalPlan>,
schema: SchemaRef,
}
-impl Debug for ExecutionPlanAdapter {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- //TODO(dennis) better debug info
- write!(f, "ExecutionPlan(PlaceHolder)")
- }
-}
-
#[async_trait::async_trait]
impl ExecutionPlan for ExecutionPlanAdapter {
fn as_any(&self) -> &dyn Any {
@@ -182,3 +177,39 @@ impl ExecutionPlan for ExecutionPlanAdapter {
Statistics::default()
}
}
+
+#[cfg(test)]
+mod tests {
+ use arrow::datatypes::Field;
+ use datafusion::physical_plan::empty::EmptyExec;
+ use datafusion_common::field_util::SchemaExt;
+ use datatypes::schema::Schema;
+
+ use super::*;
+
+ #[test]
+ fn test_physical_plan_adapter() {
+ let arrow_schema = arrow::datatypes::Schema::new(vec![Field::new(
+ "name",
+ arrow::datatypes::DataType::Utf8,
+ true,
+ )]);
+
+ let schema = Arc::new(Schema::try_from(arrow_schema.clone()).unwrap());
+ let physical_plan = PhysicalPlanAdapter::new(
+ schema.clone(),
+ Arc::new(EmptyExec::new(true, Arc::new(arrow_schema))),
+ );
+
+ assert!(physical_plan
+ .plan
+ .as_any()
+ .downcast_ref::<EmptyExec>()
+ .is_some());
+ let execution_plan_adapter = ExecutionPlanAdapter {
+ plan: Arc::new(physical_plan),
+ schema: schema.clone(),
+ };
+ assert_eq!(schema, execution_plan_adapter.schema);
+ }
+}
diff --git a/src/query/src/plan.rs b/src/query/src/plan.rs
index 9a1b98a94533..d26c0cfa739a 100644
--- a/src/query/src/plan.rs
+++ b/src/query/src/plan.rs
@@ -1,4 +1,5 @@
use std::any::Any;
+use std::fmt::Debug;
use std::sync::Arc;
use common_recordbatch::SendableRecordBatchStream;
@@ -39,7 +40,7 @@ impl Partitioning {
}
#[async_trait::async_trait]
-pub trait PhysicalPlan: Send + Sync + Any {
+pub trait PhysicalPlan: Send + Sync + Any + Debug {
/// Get the schema for this execution plan
fn schema(&self) -> SchemaRef;
diff --git a/src/storage/Cargo.toml b/src/storage/Cargo.toml
index bb3e83149c23..0fda93c14179 100644
--- a/src/storage/Cargo.toml
+++ b/src/storage/Cargo.toml
@@ -12,6 +12,7 @@ async-trait = "0.1"
bytes = "1.1"
common-base = { path = "../common/base" }
common-error = { path = "../common/error" }
+common-query = { path = "../common/query" }
common-runtime = { path = "../common/runtime" }
common-telemetry = { path = "../common/telemetry" }
common-time = { path = "../common/time" }
@@ -29,6 +30,7 @@ serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
snafu = { version = "0.7", features = ["backtraces"] }
store-api = { path = "../store-api" }
+table = { path = "../table" }
tokio = { version = "1.18", features = ["full"] }
tonic = "0.8"
uuid = { version = "1.1", features = ["v4"] }
diff --git a/src/storage/src/chunk.rs b/src/storage/src/chunk.rs
index 8ca8f23b2e71..3a94fb34d53b 100644
--- a/src/storage/src/chunk.rs
+++ b/src/storage/src/chunk.rs
@@ -1,8 +1,10 @@
use std::sync::Arc;
use async_trait::async_trait;
+use common_query::logical_plan::Expr;
use snafu::ResultExt;
use store_api::storage::{Chunk, ChunkReader, SchemaRef, SequenceNumber};
+use table::predicate::Predicate;
use crate::error::{self, Error, Result};
use crate::memtable::{IterContext, MemtableRef, MemtableSet};
@@ -49,6 +51,7 @@ impl ChunkReaderImpl {
pub struct ChunkReaderBuilder {
schema: RegionSchemaRef,
projection: Option<Vec<usize>>,
+ filters: Vec<Expr>,
sst_layer: AccessLayerRef,
iter_ctx: IterContext,
memtables: Vec<MemtableRef>,
@@ -60,6 +63,7 @@ impl ChunkReaderBuilder {
ChunkReaderBuilder {
schema,
projection: None,
+ filters: vec![],
sst_layer,
iter_ctx: IterContext::default(),
memtables: Vec::new(),
@@ -78,6 +82,11 @@ impl ChunkReaderBuilder {
self
}
+ pub fn filters(mut self, filters: Vec<Expr>) -> Self {
+ self.filters = filters;
+ self
+ }
+
pub fn batch_size(mut self, batch_size: usize) -> Self {
self.iter_ctx.batch_size = batch_size;
self
@@ -121,6 +130,7 @@ impl ChunkReaderBuilder {
let read_opts = ReadOptions {
batch_size: self.iter_ctx.batch_size,
projected_schema: schema.clone(),
+ predicate: Predicate::new(self.filters),
};
for file in &self.files_to_read {
let reader = self
diff --git a/src/storage/src/snapshot.rs b/src/storage/src/snapshot.rs
index ac3f343ff633..110573100f31 100644
--- a/src/storage/src/snapshot.rs
+++ b/src/storage/src/snapshot.rs
@@ -43,6 +43,7 @@ impl Snapshot for SnapshotImpl {
ChunkReaderBuilder::new(self.version.schema().clone(), self.sst_layer.clone())
.reserve_num_memtables(memtable_version.num_memtables())
.projection(request.projection)
+ .filters(request.filters)
.batch_size(ctx.batch_size)
.visible_sequence(visible_sequence)
.pick_memtables(mutables);
diff --git a/src/storage/src/sst.rs b/src/storage/src/sst.rs
index a04bc6d64227..3d2123bdb6d2 100644
--- a/src/storage/src/sst.rs
+++ b/src/storage/src/sst.rs
@@ -5,6 +5,7 @@ use std::sync::Arc;
use async_trait::async_trait;
use object_store::{util, ObjectStore};
use serde::{Deserialize, Serialize};
+use table::predicate::Predicate;
use crate::error::Result;
use crate::memtable::BoxedBatchIterator;
@@ -170,13 +171,14 @@ pub struct WriteOptions {
// TODO(yingwen): [flush] row group size.
}
-#[derive(Debug)]
pub struct ReadOptions {
/// Suggested size of each batch.
pub batch_size: usize,
/// The schema that user expected to read, might not the same as the
/// schema of the SST file.
pub projected_schema: ProjectedSchemaRef,
+
+ pub predicate: Predicate,
}
/// SST access layer.
@@ -240,6 +242,7 @@ impl AccessLayer for FsAccessLayer {
&file_path,
self.object_store.clone(),
opts.projected_schema.clone(),
+ opts.predicate.clone(),
);
let stream = reader.chunk_stream(opts.batch_size).await?;
diff --git a/src/storage/src/sst/parquet.rs b/src/storage/src/sst/parquet.rs
index 3dcfce72917d..afa3d1f8605d 100644
--- a/src/storage/src/sst/parquet.rs
+++ b/src/storage/src/sst/parquet.rs
@@ -6,6 +6,7 @@ use std::sync::Arc;
use async_stream::try_stream;
use async_trait::async_trait;
+use common_telemetry::debug;
use datatypes::arrow::array::Array;
use datatypes::arrow::chunk::Chunk;
use datatypes::arrow::datatypes::{DataType, Field, Schema};
@@ -19,6 +20,7 @@ use futures_util::sink::SinkExt;
use futures_util::{Stream, TryStreamExt};
use object_store::{ObjectStore, SeekableReader};
use snafu::ResultExt;
+use table::predicate::Predicate;
use crate::error::{self, Result};
use crate::memtable::BoxedBatchIterator;
@@ -157,6 +159,7 @@ pub struct ParquetReader<'a> {
file_path: &'a str,
object_store: ObjectStore,
projected_schema: ProjectedSchemaRef,
+ predicate: Predicate,
}
type ReaderFactoryFuture<'a, R> =
@@ -167,11 +170,13 @@ impl<'a> ParquetReader<'a> {
file_path: &str,
object_store: ObjectStore,
projected_schema: ProjectedSchemaRef,
+ predicate: Predicate,
) -> ParquetReader {
ParquetReader {
file_path,
object_store,
projected_schema,
+ predicate,
}
}
@@ -191,19 +196,31 @@ impl<'a> ParquetReader<'a> {
let metadata = read_metadata_async(&mut reader)
.await
.context(error::ReadParquetSnafu { file: &file_path })?;
+
let arrow_schema =
infer_schema(&metadata).context(error::ReadParquetSnafu { file: &file_path })?;
+
// Now the StoreSchema is only used to validate metadata of the parquet file, but this schema
// would be useful once we support altering schema, as this is the actual schema of the SST.
- let _store_schema = StoreSchema::try_from(arrow_schema)
+ let store_schema = StoreSchema::try_from(arrow_schema)
.context(error::ConvertStoreSchemaSnafu { file: &file_path })?;
+ let pruned_row_groups = self
+ .predicate
+ .prune_row_groups(store_schema.schema().clone(), &metadata.row_groups);
+
let projected_fields = self.projected_fields().to_vec();
let chunk_stream = try_stream!({
- for rg in metadata.row_groups {
+ for (idx, valid) in pruned_row_groups.iter().enumerate() {
+ if !valid {
+ debug!("Pruned {} row groups", idx);
+ continue;
+ }
+
+ let rg = &metadata.row_groups[idx];
let column_chunks = read_columns_many_async(
&reader_factory,
- &rg,
+ rg,
projected_fields.clone(),
Some(chunk_size),
)
diff --git a/src/store-api/Cargo.toml b/src/store-api/Cargo.toml
index b07f45d259da..0258a660eb09 100644
--- a/src/store-api/Cargo.toml
+++ b/src/store-api/Cargo.toml
@@ -9,6 +9,7 @@ async-trait = "0.1"
bytes = "1.1"
common-base = { path = "../common/base" }
common-error = { path = "../common/error" }
+common-query = { path = "../common/query" }
common-time = { path = "../common/time" }
datatypes = { path = "../datatypes" }
derive_builder = "0.11"
diff --git a/src/store-api/src/storage/requests.rs b/src/store-api/src/storage/requests.rs
index e8f727b31b96..1309cc4baab7 100644
--- a/src/store-api/src/storage/requests.rs
+++ b/src/store-api/src/storage/requests.rs
@@ -1,6 +1,7 @@
use std::time::Duration;
use common_error::ext::ErrorExt;
+use common_query::logical_plan::Expr;
use common_time::RangeMillis;
use datatypes::vectors::VectorRef;
@@ -34,7 +35,7 @@ pub trait PutOperation: Send {
fn add_value_column(&mut self, name: &str, vector: VectorRef) -> Result<(), Self::Error>;
}
-#[derive(Debug, Default)]
+#[derive(Default)]
pub struct ScanRequest {
/// Max sequence number to read, None for latest sequence.
///
@@ -43,6 +44,8 @@ pub struct ScanRequest {
pub sequence: Option<SequenceNumber>,
/// Indices of columns to read, `None` to read all columns.
pub projection: Option<Vec<usize>>,
+ /// Filters pushed down
+ pub filters: Vec<Expr>,
}
#[derive(Debug)]
diff --git a/src/table-engine/src/table.rs b/src/table-engine/src/table.rs
index 5712d011dc84..d6190d26284c 100644
--- a/src/table-engine/src/table.rs
+++ b/src/table-engine/src/table.rs
@@ -24,7 +24,7 @@ use store_api::storage::{
WriteContext, WriteRequest,
};
use table::error::{Error as TableError, MissingColumnSnafu, Result as TableResult};
-use table::metadata::{TableInfoRef, TableMetaBuilder};
+use table::metadata::{FilterPushDownType, TableInfoRef, TableMetaBuilder};
use table::requests::{AlterKind, AlterTableRequest, InsertRequest};
use table::{
metadata::{TableInfo, TableType},
@@ -118,16 +118,17 @@ impl<R: Region> Table for MitoTable<R> {
async fn scan(
&self,
projection: &Option<Vec<usize>>,
- _filters: &[Expr],
+ filters: &[Expr],
_limit: Option<usize>,
) -> TableResult<SendableRecordBatchStream> {
let read_ctx = ReadContext::default();
let snapshot = self.region.snapshot(&read_ctx).map_err(TableError::new)?;
let projection = self.transform_projection(&self.region, projection.clone())?;
-
+ let filters = filters.into();
let scan_request = ScanRequest {
projection,
+ filters,
..Default::default()
};
let mut reader = snapshot
@@ -243,6 +244,10 @@ impl<R: Region> Table for MitoTable<R> {
// table cannot be hold.
Ok(())
}
+
+ fn supports_filter_pushdown(&self, _filter: &Expr) -> table::error::Result<FilterPushDownType> {
+ Ok(FilterPushDownType::Inexact)
+ }
}
fn build_table_schema_with_new_column(
diff --git a/src/table/Cargo.toml b/src/table/Cargo.toml
index c53720e37f11..21f1023d3ba4 100644
--- a/src/table/Cargo.toml
+++ b/src/table/Cargo.toml
@@ -9,11 +9,20 @@ chrono = { version = "0.4", features = ["serde"] }
common-error = { path = "../common/error" }
common-query = { path = "../common/query" }
common-recordbatch = { path = "../common/recordbatch" }
+common-telemetry = { path = "../common/telemetry" }
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2", features = ["simd"] }
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2" }
datatypes = { path = "../datatypes" }
derive_builder = "0.11"
futures = "0.3"
+parquet-format-async-temp = "0.2"
+paste = "1.0"
serde = "1.0.136"
snafu = { version = "0.7", features = ["backtraces"] }
store-api = { path = "../store-api" }
+
+[dev-dependencies]
+datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2" }
+tempdir = "0.3"
+tokio = { version = "1.18", features = ["full"] }
+tokio-util = { version = "0.7", features = ["compat"] }
diff --git a/src/table/src/lib.rs b/src/table/src/lib.rs
index 248e813a1217..f02821500d35 100644
--- a/src/table/src/lib.rs
+++ b/src/table/src/lib.rs
@@ -1,6 +1,7 @@
pub mod engine;
pub mod error;
pub mod metadata;
+pub mod predicate;
pub mod requests;
pub mod table;
diff --git a/src/table/src/predicate.rs b/src/table/src/predicate.rs
new file mode 100644
index 000000000000..69069cb83b61
--- /dev/null
+++ b/src/table/src/predicate.rs
@@ -0,0 +1,227 @@
+mod stats;
+
+use common_query::logical_plan::Expr;
+use common_telemetry::{error, warn};
+use datafusion::physical_optimizer::pruning::PruningPredicate;
+use datatypes::arrow::io::parquet::read::RowGroupMetaData;
+use datatypes::schema::SchemaRef;
+
+use crate::predicate::stats::RowGroupPruningStatistics;
+
+#[derive(Default, Clone)]
+pub struct Predicate {
+ exprs: Vec<Expr>,
+}
+
+impl Predicate {
+ pub fn new(exprs: Vec<Expr>) -> Self {
+ Self { exprs }
+ }
+
+ pub fn empty() -> Self {
+ Self { exprs: vec![] }
+ }
+
+ pub fn prune_row_groups(
+ &self,
+ schema: SchemaRef,
+ row_groups: &[RowGroupMetaData],
+ ) -> Vec<bool> {
+ let mut res = vec![true; row_groups.len()];
+ for expr in &self.exprs {
+ match PruningPredicate::try_new(expr.df_expr().clone(), schema.arrow_schema().clone()) {
+ Ok(p) => {
+ let stat = RowGroupPruningStatistics::new(row_groups, &schema);
+ match p.prune(&stat) {
+ Ok(r) => {
+ for (curr_val, res) in r.into_iter().zip(res.iter_mut()) {
+ *res &= curr_val
+ }
+ }
+ Err(e) => {
+ warn!("Failed to prune row groups, error: {:?}", e);
+ }
+ }
+ }
+ Err(e) => {
+ error!("Failed to create predicate for expr, error: {:?}", e);
+ }
+ }
+ }
+ res
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Arc;
+
+ pub use datafusion::parquet::schema::types::{BasicTypeInfo, PhysicalType};
+ use datafusion_common::Column;
+ use datafusion_expr::Expr;
+ use datafusion_expr::Literal;
+ use datafusion_expr::Operator;
+ use datatypes::arrow::array::{Int32Array, Utf8Array};
+ use datatypes::arrow::chunk::Chunk;
+ use datatypes::arrow::datatypes::{DataType, Field, Schema};
+ use datatypes::arrow::io::parquet::read::FileReader;
+ use datatypes::arrow::io::parquet::write::{
+ Compression, Encoding, FileSink, Version, WriteOptions,
+ };
+ use futures::SinkExt;
+ use tempdir::TempDir;
+ use tokio_util::compat::TokioAsyncReadCompatExt;
+
+ use super::*;
+
+ async fn gen_test_parquet_file(dir: &TempDir, cnt: usize) -> (String, Arc<Schema>) {
+ let path = dir
+ .path()
+ .join("test-prune.parquet")
+ .to_string_lossy()
+ .to_string();
+
+ let name_field = Field::new("name", DataType::Utf8, true);
+ let count_field = Field::new("cnt", DataType::Int32, true);
+
+ let schema = Schema::from(vec![name_field, count_field]);
+
+ // now all physical types use plain encoding, maybe let caller to choose encoding for each type.
+ let encodings = vec![Encoding::Plain].repeat(schema.fields.len());
+
+ let writer = tokio::fs::OpenOptions::new()
+ .write(true)
+ .create(true)
+ .open(&path)
+ .await
+ .unwrap()
+ .compat();
+
+ let mut sink = FileSink::try_new(
+ writer,
+ schema.clone(),
+ encodings,
+ WriteOptions {
+ write_statistics: true,
+ compression: Compression::Gzip,
+ version: Version::V2,
+ },
+ )
+ .unwrap();
+
+ for i in (0..cnt).step_by(10) {
+ let name_array = Utf8Array::<i64>::from(
+ &(i..(i + 10).min(cnt))
+ .map(|i| Some(i.to_string()))
+ .collect::<Vec<_>>(),
+ );
+ let count_array = Int32Array::from(
+ &(i..(i + 10).min(cnt))
+ .map(|i| Some(i as i32))
+ .collect::<Vec<_>>(),
+ );
+
+ sink.send(Chunk::new(vec![
+ Arc::new(name_array),
+ Arc::new(count_array),
+ ]))
+ .await
+ .unwrap();
+ }
+ sink.close().await.unwrap();
+ (path, Arc::new(schema))
+ }
+
+ async fn assert_prune(array_cnt: usize, predicate: Predicate, expect: Vec<bool>) {
+ let dir = TempDir::new("prune_parquet").unwrap();
+ let (path, schema) = gen_test_parquet_file(&dir, array_cnt).await;
+ let file_reader =
+ FileReader::try_new(std::fs::File::open(path).unwrap(), None, None, None, None)
+ .unwrap();
+
+ let schema = Arc::new(datatypes::schema::Schema::try_from(schema).unwrap());
+
+ let vec = file_reader.metadata().row_groups.clone();
+ let res = predicate.prune_row_groups(schema, &vec);
+ assert_eq!(expect, res);
+ }
+
+ fn gen_predicate(max_val: i32, op: Operator) -> Predicate {
+ Predicate::new(vec![Expr::BinaryExpr {
+ left: Box::new(Expr::Column(Column::from_name("cnt".to_string()))),
+ op,
+ right: Box::new(max_val.lit()),
+ }
+ .into()])
+ }
+
+ #[tokio::test]
+ async fn test_prune_empty() {
+ assert_prune(3, Predicate::empty(), vec![true]).await;
+ }
+
+ #[tokio::test]
+ async fn test_prune_all_match() {
+ let p = gen_predicate(3, Operator::Gt);
+ assert_prune(2, p, vec![false]).await;
+ }
+
+ #[tokio::test]
+ async fn test_prune_gt() {
+ let p = gen_predicate(29, Operator::Gt);
+ assert_prune(
+ 100,
+ p,
+ vec![
+ false, false, false, true, true, true, true, true, true, true,
+ ],
+ )
+ .await;
+ }
+
+ #[tokio::test]
+ async fn test_prune_eq_expr() {
+ let p = gen_predicate(30, Operator::Eq);
+ assert_prune(40, p, vec![false, false, false, true]).await;
+ }
+
+ #[tokio::test]
+ async fn test_prune_neq_expr() {
+ let p = gen_predicate(30, Operator::NotEq);
+ assert_prune(40, p, vec![true, true, true, true]).await;
+ }
+
+ #[tokio::test]
+ async fn test_prune_gteq_expr() {
+ let p = gen_predicate(29, Operator::GtEq);
+ assert_prune(40, p, vec![false, false, true, true]).await;
+ }
+
+ #[tokio::test]
+ async fn test_prune_lt_expr() {
+ let p = gen_predicate(30, Operator::Lt);
+ assert_prune(40, p, vec![true, true, true, false]).await;
+ }
+
+ #[tokio::test]
+ async fn test_prune_lteq_expr() {
+ let p = gen_predicate(30, Operator::LtEq);
+ assert_prune(40, p, vec![true, true, true, true]).await;
+ }
+
+ #[tokio::test]
+ async fn test_prune_between_expr() {
+ let p = gen_predicate(30, Operator::LtEq);
+ assert_prune(40, p, vec![true, true, true, true]).await;
+ }
+
+ #[tokio::test]
+ async fn test_or() {
+ // cnt > 30 or cnt < 20
+ let e = Expr::Column(Column::from_name("cnt"))
+ .gt(30.lit())
+ .or(Expr::Column(Column::from_name("cnt")).lt(20.lit()));
+ let p = Predicate::new(vec![e.into()]);
+ assert_prune(40, p, vec![true, true, false, true]).await;
+ }
+}
diff --git a/src/table/src/predicate/stats.rs b/src/table/src/predicate/stats.rs
new file mode 100644
index 000000000000..b1e5d0d5a51c
--- /dev/null
+++ b/src/table/src/predicate/stats.rs
@@ -0,0 +1,142 @@
+use datafusion::parquet::metadata::RowGroupMetaData;
+use datafusion::parquet::statistics::{
+ BinaryStatistics, BooleanStatistics, FixedLenStatistics, PrimitiveStatistics,
+};
+use datafusion::physical_optimizer::pruning::PruningStatistics;
+use datafusion_common::{Column, ScalarValue};
+use datatypes::arrow::array::ArrayRef;
+use datatypes::arrow::datatypes::DataType;
+use datatypes::arrow::io::parquet::read::PhysicalType;
+use datatypes::prelude::Vector;
+use datatypes::vectors::Int64Vector;
+use paste::paste;
+
+pub struct RowGroupPruningStatistics<'a> {
+ pub meta_data: &'a [RowGroupMetaData],
+ pub schema: &'a datatypes::schema::SchemaRef,
+}
+
+impl<'a> RowGroupPruningStatistics<'a> {
+ pub fn new(
+ meta_data: &'a [RowGroupMetaData],
+ schema: &'a datatypes::schema::SchemaRef,
+ ) -> Self {
+ Self { meta_data, schema }
+ }
+
+ fn field_by_name(&self, name: &str) -> Option<(usize, &DataType)> {
+ let idx = self.schema.column_index_by_name(name)?;
+ let data_type = &self.schema.arrow_schema().fields.get(idx)?.data_type;
+ Some((idx, data_type))
+ }
+}
+
+macro_rules! impl_min_max_values {
+ ($self:ident, $col:ident, $min_max: ident) => {
+ paste! {
+ {
+ let (column_index, data_type) = $self.field_by_name(&$col.name)?;
+ let null_scalar: ScalarValue = data_type.try_into().ok()?;
+ let scalar_values: Vec<ScalarValue> = $self
+ .meta_data
+ .iter()
+ .flat_map(|meta| meta.column(column_index).statistics())
+ .map(|stats| {
+ let stats = stats.ok()?;
+ let res = match stats.physical_type() {
+ PhysicalType::Boolean => {
+ let $min_max = stats.as_any().downcast_ref::<BooleanStatistics>().unwrap().[<$min_max _value>];
+ Some(ScalarValue::Boolean($min_max))
+ }
+ PhysicalType::Int32 => {
+ let $min_max = stats
+ .as_any()
+ .downcast_ref::<PrimitiveStatistics<i32>>()
+ .unwrap()
+ .[<$min_max _value>];
+ Some(ScalarValue::Int32($min_max))
+ }
+ PhysicalType::Int64 => {
+ let $min_max = stats
+ .as_any()
+ .downcast_ref::<PrimitiveStatistics<i64>>()
+ .unwrap()
+ .[<$min_max _value>];
+ Some(ScalarValue::Int64($min_max))
+ }
+ PhysicalType::Int96 => {
+ // INT96 currently not supported
+ None
+ }
+ PhysicalType::Float => {
+ let $min_max = stats
+ .as_any()
+ .downcast_ref::<PrimitiveStatistics<f32>>()
+ .unwrap()
+ .[<$min_max _value>];
+ Some(ScalarValue::Float32($min_max))
+ }
+ PhysicalType::Double => {
+ let $min_max = stats
+ .as_any()
+ .downcast_ref::<PrimitiveStatistics<f64>>()
+ .unwrap()
+ .[<$min_max _value>];
+ Some(ScalarValue::Float64($min_max))
+ }
+ PhysicalType::ByteArray => {
+ let $min_max = stats
+ .as_any()
+ .downcast_ref::<BinaryStatistics>()
+ .unwrap()
+ .[<$min_max _value>]
+ .clone();
+ Some(ScalarValue::Binary($min_max))
+ }
+ PhysicalType::FixedLenByteArray(_) => {
+ let $min_max = stats
+ .as_any()
+ .downcast_ref::<FixedLenStatistics>()
+ .unwrap()
+ .[<$min_max _value>]
+ .clone();
+ Some(ScalarValue::Binary($min_max))
+ }
+ };
+
+ res
+ })
+ .map(|maybe_scalar| maybe_scalar.unwrap_or_else(|| null_scalar.clone()))
+ .collect::<Vec<_>>();
+ ScalarValue::iter_to_array(scalar_values).ok()
+ }
+ }
+ };
+}
+
+impl<'a> PruningStatistics for RowGroupPruningStatistics<'a> {
+ fn min_values(&self, column: &Column) -> Option<ArrayRef> {
+ impl_min_max_values!(self, column, min)
+ }
+
+ fn max_values(&self, column: &Column) -> Option<ArrayRef> {
+ impl_min_max_values!(self, column, max)
+ }
+
+ fn num_containers(&self) -> usize {
+ self.meta_data.len()
+ }
+
+ fn null_counts(&self, column: &Column) -> Option<ArrayRef> {
+ let (idx, _) = self.field_by_name(&column.name)?;
+ let mut values: Vec<Option<i64>> = Vec::with_capacity(self.meta_data.len());
+ for m in self.meta_data {
+ let col = m.column(idx);
+ let stat = col.statistics()?.ok()?;
+ let bs = stat.null_count();
+ values.push(bs);
+ }
+
+ Some(Int64Vector::from(values).to_arrow_array())
+ }
+}
diff --git a/src/table/src/table/adapter.rs b/src/table/src/table/adapter.rs
index 4d4c71b0f193..8f988b80e4f4 100644
--- a/src/table/src/table/adapter.rs
+++ b/src/table/src/table/adapter.rs
@@ -1,7 +1,7 @@
+use core::fmt::Formatter;
use core::pin::Pin;
use core::task::{Context, Poll};
use std::any::Any;
-use std::fmt;
use std::fmt::Debug;
use std::mem;
use std::sync::{Arc, Mutex};
@@ -9,6 +9,7 @@ use std::sync::{Arc, Mutex};
use common_query::logical_plan::Expr;
use common_recordbatch::error::Result as RecordBatchResult;
use common_recordbatch::{RecordBatch, RecordBatchStream, SendableRecordBatchStream};
+use common_telemetry::debug;
use datafusion::arrow::datatypes::SchemaRef as DfSchemaRef;
/// Datafusion table adpaters
use datafusion::datasource::{
@@ -40,9 +41,10 @@ struct ExecutionPlanAdapter {
}
impl Debug for ExecutionPlanAdapter {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- //TODO(dennis) better debug info
- write!(f, "ExecutionPlan(PlaceHolder)")
+ fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
+ f.debug_struct("ExecutionPlanAdapter")
+ .field("schema", &self.schema)
+ .finish()
}
}
@@ -202,7 +204,7 @@ impl Table for TableAdapter {
limit: Option<usize>,
) -> Result<SendableRecordBatchStream> {
let filters: Vec<DfExpr> = filters.iter().map(|e| e.df_expr().clone()).collect();
-
+ debug!("TableScan filter size: {}", filters.len());
let execution_plan = self
.table_provider
.scan(projection, &filters, limit)
|
feat
|
impl filter push down to parquet reader (#262)
|
fcacb100a2ed4994bee2f8390c06367a309a303f
|
2024-01-08 12:02:01
|
LFC
|
chore: expose some codes to let other projects use them (#3115)
| false
|
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index a61869b163a7..6f0d62ef3598 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -464,7 +464,7 @@ impl StartCommand {
Ok(ddl_task_executor)
}
- async fn create_table_metadata_manager(
+ pub async fn create_table_metadata_manager(
kv_backend: KvBackendRef,
) -> Result<TableMetadataManagerRef> {
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend));
diff --git a/src/common/meta/src/ddl/table_meta.rs b/src/common/meta/src/ddl/table_meta.rs
index fd68fa4735b8..0b7ba059adc7 100644
--- a/src/common/meta/src/ddl/table_meta.rs
+++ b/src/common/meta/src/ddl/table_meta.rs
@@ -33,6 +33,7 @@ use crate::rpc::router::{Region, RegionRoute};
use crate::sequence::SequenceRef;
use crate::wal::{allocate_region_wal_options, WalOptionsAllocatorRef};
+#[derive(Clone)]
pub struct TableMetadataAllocator {
table_id_sequence: SequenceRef,
wal_options_allocator: WalOptionsAllocatorRef,
|
chore
|
expose some codes to let other projects use them (#3115)
|
4c07606da66cb60239e49f799ebc8a4e008bbd96
|
2024-02-21 15:21:10
|
tison
|
refactor: put together HTTP headers (#3337)
| false
|
diff --git a/src/common/error/src/lib.rs b/src/common/error/src/lib.rs
index aa3c915e84e3..aa54ef39e78f 100644
--- a/src/common/error/src/lib.rs
+++ b/src/common/error/src/lib.rs
@@ -19,7 +19,9 @@ pub mod format;
pub mod mock;
pub mod status_code;
+pub use snafu;
+
+// HACK - these headers are here for shared in gRPC services. For common HTTP headers,
+// please define in `src/servers/src/http/header.rs`.
pub const GREPTIME_DB_HEADER_ERROR_CODE: &str = "x-greptime-err-code";
pub const GREPTIME_DB_HEADER_ERROR_MSG: &str = "x-greptime-err-msg";
-
-pub use snafu;
diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs
index e4b25adcb305..4ebbdc55445c 100644
--- a/src/servers/src/error.rs
+++ b/src/servers/src/error.rs
@@ -596,9 +596,11 @@ macro_rules! define_into_tonic_status {
($Error: ty) => {
impl From<$Error> for tonic::Status {
fn from(err: $Error) -> Self {
- use common_error::{GREPTIME_DB_HEADER_ERROR_CODE, GREPTIME_DB_HEADER_ERROR_MSG};
use tonic::codegen::http::{HeaderMap, HeaderValue};
use tonic::metadata::MetadataMap;
+ use $crate::http::header::constants::{
+ GREPTIME_DB_HEADER_ERROR_CODE, GREPTIME_DB_HEADER_ERROR_MSG,
+ };
let mut headers = HeaderMap::<HeaderValue>::with_capacity(2);
diff --git a/src/servers/src/http/arrow_result.rs b/src/servers/src/http/arrow_result.rs
index b47912b1617e..3daad34f1d00 100644
--- a/src/servers/src/http/arrow_result.rs
+++ b/src/servers/src/http/arrow_result.rs
@@ -17,7 +17,7 @@ use std::sync::Arc;
use arrow::datatypes::Schema;
use arrow_ipc::writer::FileWriter;
-use axum::http::{header, HeaderName, HeaderValue};
+use axum::http::{header, HeaderValue};
use axum::response::{IntoResponse, Response};
use common_error::status_code::StatusCode;
use common_query::Output;
@@ -122,15 +122,15 @@ impl IntoResponse for ArrowResponse {
(
[
(
- header::CONTENT_TYPE,
+ &header::CONTENT_TYPE,
HeaderValue::from_static("application/arrow"),
),
(
- HeaderName::from_static(GREPTIME_DB_HEADER_FORMAT),
+ &GREPTIME_DB_HEADER_FORMAT,
HeaderValue::from_static("ARROW"),
),
(
- HeaderName::from_static(GREPTIME_DB_HEADER_EXECUTION_TIME),
+ &GREPTIME_DB_HEADER_EXECUTION_TIME,
HeaderValue::from(execution_time),
),
],
diff --git a/src/servers/src/http/csv_result.rs b/src/servers/src/http/csv_result.rs
index 30c5d4a0264d..f0d377b01e54 100644
--- a/src/servers/src/http/csv_result.rs
+++ b/src/servers/src/http/csv_result.rs
@@ -101,9 +101,9 @@ impl IntoResponse for CsvResponse {
)
.into_response();
resp.headers_mut()
- .insert(GREPTIME_DB_HEADER_FORMAT, HeaderValue::from_static("CSV"));
+ .insert(&GREPTIME_DB_HEADER_FORMAT, HeaderValue::from_static("CSV"));
resp.headers_mut().insert(
- GREPTIME_DB_HEADER_EXECUTION_TIME,
+ &GREPTIME_DB_HEADER_EXECUTION_TIME,
HeaderValue::from(execution_time),
);
resp
diff --git a/src/servers/src/http/error_result.rs b/src/servers/src/http/error_result.rs
index 629594e66456..57a4bd698105 100644
--- a/src/servers/src/http/error_result.rs
+++ b/src/servers/src/http/error_result.rs
@@ -17,11 +17,11 @@ use axum::response::{IntoResponse, Response};
use axum::Json;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
-use common_error::{GREPTIME_DB_HEADER_ERROR_CODE, GREPTIME_DB_HEADER_ERROR_MSG};
use common_telemetry::logging::{debug, error};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
+use crate::http::header::constants::{GREPTIME_DB_HEADER_ERROR_CODE, GREPTIME_DB_HEADER_ERROR_MSG};
use crate::http::header::{GREPTIME_DB_HEADER_EXECUTION_TIME, GREPTIME_DB_HEADER_FORMAT};
use crate::http::ResponseFormat;
@@ -88,9 +88,9 @@ impl IntoResponse for ErrorResponse {
HeaderValue::from_str(&msg).expect("malformed error msg"),
);
resp.headers_mut()
- .insert(GREPTIME_DB_HEADER_FORMAT, HeaderValue::from_static(ty));
+ .insert(&GREPTIME_DB_HEADER_FORMAT, HeaderValue::from_static(ty));
resp.headers_mut().insert(
- GREPTIME_DB_HEADER_EXECUTION_TIME,
+ &GREPTIME_DB_HEADER_EXECUTION_TIME,
HeaderValue::from(execution_time),
);
resp
diff --git a/src/servers/src/http/greptime_result_v1.rs b/src/servers/src/http/greptime_result_v1.rs
index 596f1bcfdd8f..1efeeccda25c 100644
--- a/src/servers/src/http/greptime_result_v1.rs
+++ b/src/servers/src/http/greptime_result_v1.rs
@@ -76,15 +76,15 @@ impl IntoResponse for GreptimedbV1Response {
let mut resp = Json(self).into_response();
resp.headers_mut().insert(
- GREPTIME_DB_HEADER_FORMAT,
+ &GREPTIME_DB_HEADER_FORMAT,
HeaderValue::from_static("greptimedb_v1"),
);
resp.headers_mut().insert(
- GREPTIME_DB_HEADER_EXECUTION_TIME,
+ &GREPTIME_DB_HEADER_EXECUTION_TIME,
HeaderValue::from(execution_time),
);
if let Some(m) = metrics.and_then(|m| HeaderValue::from_str(&m).ok()) {
- resp.headers_mut().insert(GREPTIME_DB_HEADER_METRICS, m);
+ resp.headers_mut().insert(&GREPTIME_DB_HEADER_METRICS, m);
}
resp
diff --git a/src/servers/src/http/header.rs b/src/servers/src/http/header.rs
index aa0970dbdb3c..fd5dc8c43038 100644
--- a/src/servers/src/http/header.rs
+++ b/src/servers/src/http/header.rs
@@ -14,16 +14,45 @@
use headers::{Header, HeaderName, HeaderValue};
-pub const GREPTIME_DB_HEADER_FORMAT: &str = "x-greptime-format";
-pub const GREPTIME_DB_HEADER_EXECUTION_TIME: &str = "x-greptime-execution-time";
-pub const GREPTIME_DB_HEADER_METRICS: &str = "x-greptime-metrics";
+pub mod constants {
+ // New HTTP headers would better distinguish use cases among:
+ // * GreptimeDB
+ // * GreptimeCloud
+ // * ...
+ //
+ // And thus trying to use:
+ // * x-greptime-db-xxx
+ // * x-greptime-cloud-xxx
+ //
+ // ... accordingly
+ //
+ // Most of the headers are for GreptimeDB and thus using `x-greptime-db-` as prefix.
+ // Only use `x-greptime-cloud` when it's intentionally used by GreptimeCloud.
+
+ // LEGACY HEADERS - KEEP IT UNMODIFIED
+ pub const GREPTIME_DB_HEADER_FORMAT: &str = "x-greptime-format";
+ pub const GREPTIME_DB_HEADER_EXECUTION_TIME: &str = "x-greptime-execution-time";
+ pub const GREPTIME_DB_HEADER_METRICS: &str = "x-greptime-metrics";
+ pub const GREPTIME_DB_HEADER_NAME: &str = "x-greptime-db-name";
+ pub const GREPTIME_TIMEZONE_HEADER_NAME: &str = "x-greptime-timezone";
+ pub const GREPTIME_DB_HEADER_ERROR_CODE: &str = common_error::GREPTIME_DB_HEADER_ERROR_CODE;
+ pub const GREPTIME_DB_HEADER_ERROR_MSG: &str = common_error::GREPTIME_DB_HEADER_ERROR_MSG;
+}
+
+pub static GREPTIME_DB_HEADER_FORMAT: HeaderName =
+ HeaderName::from_static(constants::GREPTIME_DB_HEADER_FORMAT);
+pub static GREPTIME_DB_HEADER_EXECUTION_TIME: HeaderName =
+ HeaderName::from_static(constants::GREPTIME_DB_HEADER_EXECUTION_TIME);
+pub static GREPTIME_DB_HEADER_METRICS: HeaderName =
+ HeaderName::from_static(constants::GREPTIME_DB_HEADER_METRICS);
/// Header key of `db-name`. Example format of the header value is `greptime-public`.
-pub static GREPTIME_DB_HEADER_NAME: HeaderName = HeaderName::from_static("x-greptime-db-name");
-/// Header key of query specific timezone.
-/// Example format of the header value is `Asia/Shanghai` or `+08:00`.
+pub static GREPTIME_DB_HEADER_NAME: HeaderName =
+ HeaderName::from_static(constants::GREPTIME_DB_HEADER_NAME);
+
+/// Header key of query specific timezone. Example format of the header value is `Asia/Shanghai` or `+08:00`.
pub static GREPTIME_TIMEZONE_HEADER_NAME: HeaderName =
- HeaderName::from_static("x-greptime-timezone");
+ HeaderName::from_static(constants::GREPTIME_TIMEZONE_HEADER_NAME);
pub struct GreptimeDbName(Option<String>);
diff --git a/src/servers/src/http/influxdb_result_v1.rs b/src/servers/src/http/influxdb_result_v1.rs
index a4e8206058df..05525ea128a6 100644
--- a/src/servers/src/http/influxdb_result_v1.rs
+++ b/src/servers/src/http/influxdb_result_v1.rs
@@ -217,11 +217,11 @@ impl IntoResponse for InfluxdbV1Response {
let execution_time = self.execution_time_ms;
let mut resp = Json(self).into_response();
resp.headers_mut().insert(
- GREPTIME_DB_HEADER_FORMAT,
+ &GREPTIME_DB_HEADER_FORMAT,
HeaderValue::from_static("influxdb_v1"),
);
resp.headers_mut().insert(
- GREPTIME_DB_HEADER_EXECUTION_TIME,
+ &GREPTIME_DB_HEADER_EXECUTION_TIME,
HeaderValue::from(execution_time),
);
resp
diff --git a/src/servers/src/http/prometheus_resp.rs b/src/servers/src/http/prometheus_resp.rs
index 3deb0b109143..e7a310faf5b0 100644
--- a/src/servers/src/http/prometheus_resp.rs
+++ b/src/servers/src/http/prometheus_resp.rs
@@ -66,7 +66,7 @@ impl IntoResponse for PrometheusJsonResponse {
let mut resp = Json(self).into_response();
if let Some(m) = metrics.and_then(|m| HeaderValue::from_str(&m).ok()) {
- resp.headers_mut().insert(GREPTIME_DB_HEADER_METRICS, m);
+ resp.headers_mut().insert(&GREPTIME_DB_HEADER_METRICS, m);
}
resp
diff --git a/src/servers/tests/http/influxdb_test.rs b/src/servers/tests/http/influxdb_test.rs
index 81f17181093b..9b68802bb4c6 100644
--- a/src/servers/tests/http/influxdb_test.rs
+++ b/src/servers/tests/http/influxdb_test.rs
@@ -169,7 +169,7 @@ async fn test_influxdb_write() {
.await;
assert_eq!(result.status(), 401);
assert_eq!(
- result.headers().get(GREPTIME_DB_HEADER_FORMAT).unwrap(),
+ result.headers().get(&GREPTIME_DB_HEADER_FORMAT).unwrap(),
"influxdb_v1",
);
assert_eq!(
@@ -185,7 +185,7 @@ async fn test_influxdb_write() {
.await;
assert_eq!(result.status(), 401);
assert_eq!(
- result.headers().get(GREPTIME_DB_HEADER_FORMAT).unwrap(),
+ result.headers().get(&GREPTIME_DB_HEADER_FORMAT).unwrap(),
"influxdb_v1",
);
assert_eq!(
|
refactor
|
put together HTTP headers (#3337)
|
4920836021599849eb1446ad189fc36b60e179e7
|
2023-05-17 12:07:08
|
zyy17
|
refactor: support parsing env list (#1595)
| false
|
diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs
index ffdc971e9d96..b6be54f670b1 100644
--- a/src/cmd/src/datanode.rs
+++ b/src/cmd/src/datanode.rs
@@ -86,8 +86,8 @@ struct StartCommand {
rpc_hostname: Option<String>,
#[clap(long)]
mysql_addr: Option<String>,
- #[clap(long)]
- metasrv_addr: Option<String>,
+ #[clap(long, multiple = true, value_delimiter = ',')]
+ metasrv_addr: Option<Vec<String>>,
#[clap(short, long)]
config_file: Option<String>,
#[clap(long)]
@@ -104,8 +104,11 @@ struct StartCommand {
impl StartCommand {
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
- let mut opts: DatanodeOptions =
- Options::load_layered_options(self.config_file.as_deref(), self.env_prefix.as_ref())?;
+ let mut opts: DatanodeOptions = Options::load_layered_options(
+ self.config_file.as_deref(),
+ self.env_prefix.as_ref(),
+ DatanodeOptions::env_list_keys(),
+ )?;
if let Some(dir) = top_level_opts.log_dir {
opts.logging.dir = dir;
@@ -130,15 +133,10 @@ impl StartCommand {
opts.node_id = Some(node_id);
}
- if let Some(meta_addr) = &self.metasrv_addr {
+ if let Some(metasrv_addrs) = &self.metasrv_addr {
opts.meta_client_options
.get_or_insert_with(MetaClientOptions::default)
- .metasrv_addrs = meta_addr
- .clone()
- .split(',')
- .map(&str::trim)
- .map(&str::to_string)
- .collect::<_>();
+ .metasrv_addrs = metasrv_addrs.clone();
opts.mode = Mode::Distributed;
}
@@ -316,7 +314,7 @@ mod tests {
if let Options::Datanode(opt) = (StartCommand {
node_id: Some(42),
- metasrv_addr: Some("127.0.0.1:3002".to_string()),
+ metasrv_addr: Some(vec!["127.0.0.1:3002".to_string()]),
..Default::default()
})
.load_options(TopLevelOptions::default())
@@ -326,7 +324,7 @@ mod tests {
}
assert!((StartCommand {
- metasrv_addr: Some("127.0.0.1:3002".to_string()),
+ metasrv_addr: Some(vec!["127.0.0.1:3002".to_string()]),
..Default::default()
})
.load_options(TopLevelOptions::default())
@@ -371,7 +369,6 @@ mod tests {
mysql_runtime_size = 2
[meta_client_options]
- metasrv_addrs = ["127.0.0.1:3002"]
timeout_millis = 3000
connect_timeout_millis = 5000
tcp_nodelay = true
@@ -427,6 +424,16 @@ mod tests {
.join(ENV_VAR_SEP),
Some("99"),
),
+ (
+ // meta_client_options.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
+ vec![
+ env_prefix.to_string(),
+ "meta_client_options".to_uppercase(),
+ "metasrv_addrs".to_uppercase(),
+ ]
+ .join(ENV_VAR_SEP),
+ Some("127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003"),
+ ),
],
|| {
let command = StartCommand {
@@ -444,6 +451,14 @@ mod tests {
opts.storage.manifest.gc_duration,
Some(Duration::from_secs(9))
);
+ assert_eq!(
+ opts.meta_client_options.unwrap().metasrv_addrs,
+ vec![
+ "127.0.0.1:3001".to_string(),
+ "127.0.0.1:3002".to_string(),
+ "127.0.0.1:3003".to_string()
+ ]
+ );
// Should be read from config file, config file > env > default values.
assert_eq!(opts.storage.compaction.max_purge_tasks, 32);
diff --git a/src/cmd/src/frontend.rs b/src/cmd/src/frontend.rs
index a082a2ab1fea..ba57a99a5428 100644
--- a/src/cmd/src/frontend.rs
+++ b/src/cmd/src/frontend.rs
@@ -102,8 +102,8 @@ pub struct StartCommand {
config_file: Option<String>,
#[clap(short, long)]
influxdb_enable: Option<bool>,
- #[clap(long)]
- metasrv_addr: Option<String>,
+ #[clap(long, multiple = true, value_delimiter = ',')]
+ metasrv_addr: Option<Vec<String>>,
#[clap(long)]
tls_mode: Option<TlsMode>,
#[clap(long)]
@@ -120,8 +120,11 @@ pub struct StartCommand {
impl StartCommand {
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
- let mut opts: FrontendOptions =
- Options::load_layered_options(self.config_file.as_deref(), self.env_prefix.as_ref())?;
+ let mut opts: FrontendOptions = Options::load_layered_options(
+ self.config_file.as_deref(),
+ self.env_prefix.as_ref(),
+ FrontendOptions::env_list_keys(),
+ )?;
if let Some(dir) = top_level_opts.log_dir {
opts.logging.dir = dir;
@@ -182,15 +185,10 @@ impl StartCommand {
opts.influxdb_options = Some(InfluxdbOptions { enable });
}
- if let Some(metasrv_addr) = &self.metasrv_addr {
+ if let Some(metasrv_addrs) = &self.metasrv_addr {
opts.meta_client_options
.get_or_insert_with(MetaClientOptions::default)
- .metasrv_addrs = metasrv_addr
- .clone()
- .split(',')
- .map(&str::trim)
- .map(&str::to_string)
- .collect::<Vec<_>>();
+ .metasrv_addrs = metasrv_addrs.clone();
opts.mode = Mode::Distributed;
}
@@ -374,6 +372,11 @@ mod tests {
[http_options]
addr = "127.0.0.1:4000"
+ [meta_client_options]
+ timeout_millis = 3000
+ connect_timeout_millis = 5000
+ tcp_nodelay = true
+
[mysql_options]
addr = "127.0.0.1:4002"
"#;
@@ -412,6 +415,16 @@ mod tests {
.join(ENV_VAR_SEP),
Some("127.0.0.1:24000"),
),
+ (
+ // meta_client_options.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
+ vec![
+ env_prefix.to_string(),
+ "meta_client_options".to_uppercase(),
+ "metasrv_addrs".to_uppercase(),
+ ]
+ .join(ENV_VAR_SEP),
+ Some("127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003"),
+ ),
],
|| {
let command = StartCommand {
@@ -430,6 +443,14 @@ mod tests {
// Should be read from env, env > default values.
assert_eq!(fe_opts.mysql_options.as_ref().unwrap().runtime_size, 11);
+ assert_eq!(
+ fe_opts.meta_client_options.unwrap().metasrv_addrs,
+ vec![
+ "127.0.0.1:3001".to_string(),
+ "127.0.0.1:3002".to_string(),
+ "127.0.0.1:3003".to_string()
+ ]
+ );
// Should be read from config file, config file > env > default values.
assert_eq!(
diff --git a/src/cmd/src/metasrv.rs b/src/cmd/src/metasrv.rs
index 59edf69b1a69..ba0c9a6c1295 100644
--- a/src/cmd/src/metasrv.rs
+++ b/src/cmd/src/metasrv.rs
@@ -102,8 +102,11 @@ struct StartCommand {
impl StartCommand {
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
- let mut opts: MetaSrvOptions =
- Options::load_layered_options(self.config_file.as_deref(), self.env_prefix.as_ref())?;
+ let mut opts: MetaSrvOptions = Options::load_layered_options(
+ self.config_file.as_deref(),
+ self.env_prefix.as_ref(),
+ None,
+ )?;
if let Some(dir) = top_level_opts.log_dir {
opts.logging.dir = dir;
diff --git a/src/cmd/src/options.rs b/src/cmd/src/options.rs
index 595cf099b934..6a92c1521075 100644
--- a/src/cmd/src/options.rs
+++ b/src/cmd/src/options.rs
@@ -23,6 +23,7 @@ use snafu::ResultExt;
use crate::error::{LoadLayeredConfigSnafu, Result};
pub const ENV_VAR_SEP: &str = "__";
+pub const ENV_LIST_SEP: &str = ",";
pub struct MixOptions {
pub fe_opts: FrontendOptions,
@@ -60,9 +61,12 @@ impl Options {
/// `env_prefix` is the prefix of environment variables, e.g. "FRONTEND__xxx".
/// The function will use dunder(double underscore) `__` as the separator for environment variables, for example:
/// `DATANODE__STORAGE__MANIFEST__CHECKPOINT_MARGIN` will be mapped to `DatanodeOptions.storage.manifest.checkpoint_margin` field in the configuration.
+ /// `list_keys` is the list of keys that should be parsed as a list, for example, you can pass `Some(&["meta_client_options.metasrv_addrs"]` to parse `GREPTIMEDB_METASRV__META_CLIENT_OPTIONS__METASRV_ADDRS` as a list.
+ /// The function will use comma `,` as the separator for list values, for example: `127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003`.
pub fn load_layered_options<'de, T: Serialize + Deserialize<'de> + Default>(
config_file: Option<&str>,
env_prefix: &str,
+ list_keys: Option<&[&str]>,
) -> Result<T> {
let default_opts = T::default();
@@ -73,6 +77,13 @@ impl Options {
env = env.prefix(env_prefix);
}
+ if let Some(list_keys) = list_keys {
+ env = env.list_separator(ENV_LIST_SEP);
+ for key in list_keys {
+ env = env.with_list_parse_key(key);
+ }
+ }
+
env.try_parsing(true)
.separator(ENV_VAR_SEP)
.ignore_empty(true)
@@ -121,7 +132,6 @@ mod tests {
mysql_runtime_size = 2
[meta_client_options]
- metasrv_addrs = ["127.0.0.1:3002"]
timeout_millis = 3000
connect_timeout_millis = 5000
tcp_nodelay = true
@@ -212,11 +222,24 @@ mod tests {
.join(ENV_VAR_SEP),
Some("/other/wal/dir"),
),
+ (
+ // meta_client_options.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
+ vec![
+ env_prefix.to_string(),
+ "meta_client_options".to_uppercase(),
+ "metasrv_addrs".to_uppercase(),
+ ]
+ .join(ENV_VAR_SEP),
+ Some("127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003"),
+ ),
],
|| {
- let opts: DatanodeOptions =
- Options::load_layered_options(Some(file.path().to_str().unwrap()), env_prefix)
- .unwrap();
+ let opts: DatanodeOptions = Options::load_layered_options(
+ Some(file.path().to_str().unwrap()),
+ env_prefix,
+ DatanodeOptions::env_list_keys(),
+ )
+ .unwrap();
// Check the configs from environment variables.
assert_eq!(opts.storage.manifest.checkpoint_margin, Some(99));
@@ -231,6 +254,14 @@ mod tests {
Some(Duration::from_secs(42))
);
assert!(opts.storage.manifest.checkpoint_on_startup);
+ assert_eq!(
+ opts.meta_client_options.unwrap().metasrv_addrs,
+ vec![
+ "127.0.0.1:3001".to_string(),
+ "127.0.0.1:3002".to_string(),
+ "127.0.0.1:3003".to_string()
+ ]
+ );
// Should be the values from config file, not environment variables.
assert_eq!(opts.wal.dir, "/tmp/greptimedb/wal".to_string());
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index 7016e0c5ad8a..f53c11ede372 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -217,8 +217,11 @@ struct StartCommand {
impl StartCommand {
fn load_options(&self, top_level_options: TopLevelOptions) -> Result<Options> {
- let mut opts: StandaloneOptions =
- Options::load_layered_options(self.config_file.as_deref(), self.env_prefix.as_ref())?;
+ let mut opts: StandaloneOptions = Options::load_layered_options(
+ self.config_file.as_deref(),
+ self.env_prefix.as_ref(),
+ None,
+ )?;
opts.enable_memory_catalog = self.enable_memory_catalog;
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index 242ed5c48f32..b9d706c950d7 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -323,6 +323,12 @@ impl Default for DatanodeOptions {
}
}
+impl DatanodeOptions {
+ pub fn env_list_keys() -> Option<&'static [&'static str]> {
+ Some(&["meta_client_options.metasrv_addrs"])
+ }
+}
+
/// Datanode service.
pub struct Datanode {
opts: DatanodeOptions,
diff --git a/src/frontend/src/frontend.rs b/src/frontend/src/frontend.rs
index 574bf7727d00..59c6a997286a 100644
--- a/src/frontend/src/frontend.rs
+++ b/src/frontend/src/frontend.rs
@@ -60,6 +60,12 @@ impl Default for FrontendOptions {
}
}
+impl FrontendOptions {
+ pub fn env_list_keys() -> Option<&'static [&'static str]> {
+ Some(&["meta_client_options.metasrv_addrs"])
+ }
+}
+
#[cfg(test)]
mod tests {
use super::*;
|
refactor
|
support parsing env list (#1595)
|
a2f9b788f1e52601ca784a680dfb4d004042c4da
|
2022-11-08 13:48:13
|
Lei, Huang
|
fix: datanode start in standalone mode by default (#418)
| false
|
diff --git a/config/datanode.example.toml b/config/datanode.example.toml
index 18cba573b50b..aa4b47d18760 100644
--- a/config/datanode.example.toml
+++ b/config/datanode.example.toml
@@ -3,7 +3,7 @@ http_addr = '0.0.0.0:3000'
rpc_addr = '0.0.0.0:3001'
wal_dir = '/tmp/greptimedb/wal'
rpc_runtime_size = 8
-
+mode = "standalone"
mysql_addr = '0.0.0.0:3306'
mysql_runtime_size = 4
diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs
index 7c0919c81a4f..4b99482542f9 100644
--- a/src/cmd/src/datanode.rs
+++ b/src/cmd/src/datanode.rs
@@ -1,9 +1,9 @@
use clap::Parser;
use common_telemetry::logging;
-use datanode::datanode::{Datanode, DatanodeOptions};
+use datanode::datanode::{Datanode, DatanodeOptions, Mode};
use snafu::ResultExt;
-use crate::error::{Error, Result, StartDatanodeSnafu};
+use crate::error::{Error, MissingConfigSnafu, Result, StartDatanodeSnafu};
use crate::toml_loader;
#[derive(Parser)]
@@ -75,9 +75,6 @@ impl TryFrom<StartCommand> for DatanodeOptions {
DatanodeOptions::default()
};
- if let Some(node_id) = cmd.node_id {
- opts.node_id = node_id;
- }
if let Some(addr) = cmd.http_addr {
opts.http_addr = addr;
}
@@ -90,8 +87,31 @@ impl TryFrom<StartCommand> for DatanodeOptions {
if let Some(addr) = cmd.postgres_addr {
opts.postgres_addr = addr;
}
- if let Some(addr) = cmd.metasrv_addr {
- opts.meta_client_opts.metasrv_addr = addr;
+
+ match (cmd.metasrv_addr, cmd.node_id) {
+ (Some(meta_addr), Some(node_id)) => {
+ // Running mode is only set to Distributed when
+ // both metasrv addr and node id are set in
+ // commandline options
+ opts.meta_client_opts.metasrv_addr = meta_addr;
+ opts.node_id = node_id;
+ opts.mode = Mode::Distributed;
+ }
+ (None, None) => {
+ opts.mode = Mode::Standalone;
+ }
+ (None, Some(_)) => {
+ return MissingConfigSnafu {
+ msg: "Missing metasrv address option",
+ }
+ .fail();
+ }
+ (Some(_), None) => {
+ return MissingConfigSnafu {
+ msg: "Missing node id option",
+ }
+ .fail();
+ }
}
Ok(opts)
}
@@ -140,4 +160,58 @@ mod tests {
}
};
}
+
+ #[test]
+ fn test_try_from_cmd() {
+ assert_eq!(
+ Mode::Standalone,
+ DatanodeOptions::try_from(StartCommand {
+ node_id: None,
+ http_addr: None,
+ rpc_addr: None,
+ mysql_addr: None,
+ postgres_addr: None,
+ metasrv_addr: None,
+ config_file: None
+ })
+ .unwrap()
+ .mode
+ );
+
+ assert_eq!(
+ Mode::Distributed,
+ DatanodeOptions::try_from(StartCommand {
+ node_id: Some(42),
+ http_addr: None,
+ rpc_addr: None,
+ mysql_addr: None,
+ postgres_addr: None,
+ metasrv_addr: Some("127.0.0.1:3002".to_string()),
+ config_file: None
+ })
+ .unwrap()
+ .mode
+ );
+
+ assert!(DatanodeOptions::try_from(StartCommand {
+ node_id: None,
+ http_addr: None,
+ rpc_addr: None,
+ mysql_addr: None,
+ postgres_addr: None,
+ metasrv_addr: Some("127.0.0.1:3002".to_string()),
+ config_file: None,
+ })
+ .is_err());
+ assert!(DatanodeOptions::try_from(StartCommand {
+ node_id: Some(42),
+ http_addr: None,
+ rpc_addr: None,
+ mysql_addr: None,
+ postgres_addr: None,
+ metasrv_addr: None,
+ config_file: None,
+ })
+ .is_err());
+ }
}
diff --git a/src/cmd/src/error.rs b/src/cmd/src/error.rs
index 5db41d96b2ca..9d65a565a95f 100644
--- a/src/cmd/src/error.rs
+++ b/src/cmd/src/error.rs
@@ -35,6 +35,9 @@ pub enum Error {
source: toml::de::Error,
backtrace: Backtrace,
},
+
+ #[snafu(display("Missing config, msg: {}", msg))]
+ MissingConfig { msg: String, backtrace: Backtrace },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -45,7 +48,9 @@ impl ErrorExt for Error {
Error::StartDatanode { source } => source.status_code(),
Error::StartFrontend { source } => source.status_code(),
Error::StartMetaServer { source } => source.status_code(),
- Error::ReadConfig { .. } | Error::ParseConfig { .. } => StatusCode::InvalidArguments,
+ Error::ReadConfig { .. } | Error::ParseConfig { .. } | Error::MissingConfig { .. } => {
+ StatusCode::InvalidArguments
+ }
}
}
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index 5286879c3ba7..4ce2685c8d6e 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -23,6 +23,13 @@ impl Default for ObjectStoreConfig {
}
}
+#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
+#[serde(rename_all = "lowercase")]
+pub enum Mode {
+ Standalone,
+ Distributed,
+}
+
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct DatanodeOptions {
pub node_id: u64,
@@ -36,6 +43,7 @@ pub struct DatanodeOptions {
pub meta_client_opts: MetaClientOpts,
pub wal_dir: String,
pub storage: ObjectStoreConfig,
+ pub mode: Mode,
}
impl Default for DatanodeOptions {
@@ -55,6 +63,7 @@ impl Default for DatanodeOptions {
common_time::util::current_time_millis()
),
storage: ObjectStoreConfig::default(),
+ mode: Mode::Standalone,
}
}
}
diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs
index 9c8a0897e2f3..bba80ac98a40 100644
--- a/src/datanode/src/instance.rs
+++ b/src/datanode/src/instance.rs
@@ -14,8 +14,8 @@ use storage::{config::EngineConfig as StorageEngineConfig, EngineImpl};
use table_engine::config::EngineConfig as TableEngineConfig;
use table_engine::engine::MitoEngine;
-use crate::datanode::{DatanodeOptions, MetaClientOpts, ObjectStoreConfig};
-use crate::error::{self, MetaClientInitSnafu, NewCatalogSnafu, Result};
+use crate::datanode::{DatanodeOptions, MetaClientOpts, Mode, ObjectStoreConfig};
+use crate::error::{self, CatalogSnafu, MetaClientInitSnafu, NewCatalogSnafu, Result};
use crate::heartbeat::HeartbeatTask;
use crate::script::ScriptExecutor;
use crate::server::grpc::plan::PhysicalPlanner;
@@ -34,8 +34,8 @@ pub struct Instance {
pub(crate) physical_planner: PhysicalPlanner,
pub(crate) script_executor: ScriptExecutor,
#[allow(unused)]
- pub(crate) meta_client: MetaClient,
- pub(crate) heartbeat_task: HeartbeatTask,
+ pub(crate) meta_client: Option<MetaClient>,
+ pub(crate) heartbeat_task: Option<HeartbeatTask>,
}
pub type InstanceRef = Arc<Instance>;
@@ -44,7 +44,13 @@ impl Instance {
pub async fn new(opts: &DatanodeOptions) -> Result<Self> {
let object_store = new_object_store(&opts.storage).await?;
let log_store = create_local_file_log_store(opts).await?;
- let meta_client = new_metasrv_client(opts.node_id, &opts.meta_client_opts).await?;
+
+ let meta_client = match opts.mode {
+ Mode::Standalone => None,
+ Mode::Distributed => {
+ Some(new_metasrv_client(opts.node_id, &opts.meta_client_opts).await?)
+ }
+ };
let table_engine = Arc::new(DefaultEngine::new(
TableEngineConfig::default(),
@@ -57,24 +63,42 @@ impl Instance {
));
// create remote catalog manager
- let catalog_manager = Arc::new(catalog::remote::RemoteCatalogManager::new(
- table_engine.clone(),
- opts.node_id,
- Arc::new(MetaKvBackend {
- client: meta_client.clone(),
- }),
- ));
+ let (catalog_manager, factory) = match opts.mode {
+ Mode::Standalone => {
+ let catalog = Arc::new(
+ catalog::local::LocalCatalogManager::try_new(table_engine.clone())
+ .await
+ .context(CatalogSnafu)?,
+ );
+ let factory = QueryEngineFactory::new(catalog.clone());
+ (catalog as CatalogManagerRef, factory)
+ }
+
+ Mode::Distributed => {
+ let catalog = Arc::new(catalog::remote::RemoteCatalogManager::new(
+ table_engine.clone(),
+ opts.node_id,
+ Arc::new(MetaKvBackend {
+ client: meta_client.as_ref().unwrap().clone(),
+ }),
+ ));
+ let factory = QueryEngineFactory::new(catalog.clone());
+ (catalog as CatalogManagerRef, factory)
+ }
+ };
- let factory = QueryEngineFactory::new(catalog_manager.clone());
let query_engine = factory.query_engine().clone();
let script_executor =
ScriptExecutor::new(catalog_manager.clone(), query_engine.clone()).await?;
- let heartbeat_task = HeartbeatTask::new(
- opts.node_id, /*node id not set*/
- opts.rpc_addr.clone(),
- meta_client.clone(),
- );
+ let heartbeat_task = match opts.mode {
+ Mode::Standalone => None,
+ Mode::Distributed => Some(HeartbeatTask::new(
+ opts.node_id, /*node id not set*/
+ opts.rpc_addr.clone(),
+ meta_client.as_ref().unwrap().clone(),
+ )),
+ };
Ok(Self {
query_engine: query_engine.clone(),
sql_handler: SqlHandler::new(table_engine, catalog_manager.clone()),
@@ -91,7 +115,9 @@ impl Instance {
.start()
.await
.context(NewCatalogSnafu)?;
- self.heartbeat_task.start().await?;
+ if let Some(task) = &self.heartbeat_task {
+ task.start().await?;
+ }
Ok(())
}
diff --git a/src/datanode/src/mock.rs b/src/datanode/src/mock.rs
index 78ab01cb0bf8..d06a8e9fe5d7 100644
--- a/src/datanode/src/mock.rs
+++ b/src/datanode/src/mock.rs
@@ -23,7 +23,7 @@ impl Instance {
use table_engine::table::test_util::MockEngine;
use table_engine::table::test_util::MockMitoEngine;
- let meta_client = mock_meta_client().await;
+ let meta_client = Some(mock_meta_client().await);
let (_dir, object_store) = new_test_object_store("setup_mock_engine_and_table").await;
let mock_engine = Arc::new(MockMitoEngine::new(
TableEngineConfig::default(),
@@ -46,8 +46,11 @@ impl Instance {
.await
.unwrap();
- let heartbeat_task =
- HeartbeatTask::new(0, "127.0.0.1:3302".to_string(), meta_client.clone());
+ let heartbeat_task = Some(HeartbeatTask::new(
+ 0,
+ "127.0.0.1:3302".to_string(),
+ meta_client.as_ref().unwrap().clone(),
+ ));
Ok(Self {
query_engine,
sql_handler,
@@ -95,8 +98,8 @@ impl Instance {
catalog_manager,
physical_planner: PhysicalPlanner::new(query_engine),
script_executor,
- meta_client,
- heartbeat_task,
+ meta_client: Some(meta_client),
+ heartbeat_task: Some(heartbeat_task),
})
}
}
|
fix
|
datanode start in standalone mode by default (#418)
|
9d5be75a9cc1c8869c3f2f64dacbe6d82f957f88
|
2022-08-17 16:06:05
|
evenyag
|
refactor: Move test_util to datanode/src (#178)
| false
|
diff --git a/src/datanode/src/lib.rs b/src/datanode/src/lib.rs
index 398ee2747bf5..4fe0ef90175e 100644
--- a/src/datanode/src/lib.rs
+++ b/src/datanode/src/lib.rs
@@ -6,3 +6,5 @@ pub mod instance;
mod metric;
pub mod server;
mod sql;
+#[cfg(test)]
+mod tests;
diff --git a/src/datanode/src/tests.rs b/src/datanode/src/tests.rs
new file mode 100644
index 000000000000..4c321a7d1119
--- /dev/null
+++ b/src/datanode/src/tests.rs
@@ -0,0 +1,4 @@
+mod grpc_test;
+mod http_test;
+mod instance_test;
+mod test_util;
diff --git a/src/datanode/tests/grpc_test.rs b/src/datanode/src/tests/grpc_test.rs
similarity index 98%
rename from src/datanode/tests/grpc_test.rs
rename to src/datanode/src/tests/grpc_test.rs
index 8cf37e8fbbaa..0b91fa3ef86a 100644
--- a/src/datanode/tests/grpc_test.rs
+++ b/src/datanode/src/tests/grpc_test.rs
@@ -1,15 +1,15 @@
-mod test_util;
-
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
use api::v1::{codec::InsertBatch, column, select_expr, Column, SelectExpr};
use client::{Client, Database, ObjectResult};
-use datanode::instance::Instance;
use servers::grpc::GrpcServer;
use servers::server::Server;
+use crate::instance::Instance;
+use crate::tests::test_util;
+
#[tokio::test]
async fn test_insert_and_select() {
common_telemetry::init_default_ut_logging();
diff --git a/src/datanode/tests/http_test.rs b/src/datanode/src/tests/http_test.rs
similarity index 97%
rename from src/datanode/tests/http_test.rs
rename to src/datanode/src/tests/http_test.rs
index 2720bdff661b..2a6c25e43b2a 100644
--- a/src/datanode/tests/http_test.rs
+++ b/src/datanode/src/tests/http_test.rs
@@ -1,14 +1,14 @@
-mod test_util;
-
use std::sync::Arc;
use axum::http::StatusCode;
use axum::Router;
use axum_test_helper::TestClient;
-use datanode::instance::Instance;
use servers::http::HttpServer;
use test_util::TestGuard;
+use crate::instance::Instance;
+use crate::tests::test_util;
+
async fn make_test_app() -> (Router, TestGuard) {
let (opts, guard) = test_util::create_tmp_dir_and_datanode_opts();
let instance = Arc::new(Instance::new(&opts).await.unwrap());
diff --git a/src/datanode/tests/instance_test.rs b/src/datanode/src/tests/instance_test.rs
similarity index 97%
rename from src/datanode/tests/instance_test.rs
rename to src/datanode/src/tests/instance_test.rs
index a259e7e7911f..2a86829db9b8 100644
--- a/src/datanode/tests/instance_test.rs
+++ b/src/datanode/src/tests/instance_test.rs
@@ -1,10 +1,10 @@
-mod test_util;
-
use arrow::array::UInt64Array;
use common_recordbatch::util;
-use datanode::instance::Instance;
use query::Output;
+use crate::instance::Instance;
+use crate::tests::test_util;
+
#[tokio::test]
async fn test_execute_insert() {
common_telemetry::init_default_ut_logging();
diff --git a/src/datanode/tests/test_util.rs b/src/datanode/src/tests/test_util.rs
similarity index 87%
rename from src/datanode/tests/test_util.rs
rename to src/datanode/src/tests/test_util.rs
index 38d9377dc18e..888e70bbb9d5 100644
--- a/src/datanode/tests/test_util.rs
+++ b/src/datanode/src/tests/test_util.rs
@@ -1,9 +1,6 @@
use std::sync::Arc;
use catalog::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
-use datanode::datanode::{DatanodeOptions, ObjectStoreConfig};
-use datanode::error::{CreateTableSnafu, Result};
-use datanode::instance::Instance;
use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnSchema, SchemaBuilder};
use snafu::ResultExt;
@@ -12,6 +9,10 @@ use table::engine::TableEngineRef;
use table::requests::CreateTableRequest;
use tempdir::TempDir;
+use crate::datanode::{DatanodeOptions, ObjectStoreConfig};
+use crate::error::{CreateTableSnafu, Result};
+use crate::instance::Instance;
+
/// Create a tmp dir(will be deleted once it goes out of scope.) and a default `DatanodeOptions`,
/// Only for test.
pub struct TestGuard {
@@ -38,10 +39,6 @@ pub fn create_tmp_dir_and_datanode_opts() -> (DatanodeOptions, TestGuard) {
)
}
-// It's actually not dead codes, at least been used in instance_test.rs and grpc_test.rs
-// However, clippy keeps warning us, so I temporary add an "allow" to bypass it.
-// TODO(LFC): further investigate why clippy falsely warning "dead_code"
-#[allow(dead_code)]
pub async fn create_test_table(instance: &Instance) -> Result<()> {
let column_schemas = vec![
ColumnSchema::new("host", ConcreteDataType::string_datatype(), false),
|
refactor
|
Move test_util to datanode/src (#178)
|
65f8b72d348fd3c69dfbd602d5905551f9be602c
|
2024-06-12 13:51:30
|
Yingwen
|
feat: Implement RegionScanner for SeqScan (#4060)
| false
|
diff --git a/src/common/time/src/timestamp.rs b/src/common/time/src/timestamp.rs
index a20a1925d040..a4aac7fd54ba 100644
--- a/src/common/time/src/timestamp.rs
+++ b/src/common/time/src/timestamp.rs
@@ -14,7 +14,7 @@
use core::default::Default;
use std::cmp::Ordering;
-use std::fmt::{Display, Formatter, Write};
+use std::fmt::{self, Display, Formatter, Write};
use std::hash::{Hash, Hasher};
use std::time::Duration;
@@ -41,7 +41,7 @@ use crate::{error, Interval};
/// # Note:
/// For values out of range, you can still store these timestamps, but while performing arithmetic
/// or formatting operations, it will return an error or just overflow.
-#[derive(Debug, Clone, Default, Copy, Serialize, Deserialize)]
+#[derive(Clone, Default, Copy, Serialize, Deserialize)]
pub struct Timestamp {
value: i64,
unit: TimeUnit,
@@ -498,6 +498,12 @@ impl From<Timestamp> for serde_json::Value {
}
}
+impl fmt::Debug for Timestamp {
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ write!(f, "{}::{}", self.value, self.unit)
+ }
+}
+
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum TimeUnit {
Second,
@@ -1382,4 +1388,24 @@ mod tests {
Timestamp::MAX_SECOND.to_timezone_aware_string(Some(&Timezone::Named(Tz::UTC)))
);
}
+
+ #[test]
+ fn test_debug_timestamp() {
+ assert_eq!(
+ "1000::Second",
+ format!("{:?}", Timestamp::new(1000, TimeUnit::Second))
+ );
+ assert_eq!(
+ "1001::Millisecond",
+ format!("{:?}", Timestamp::new(1001, TimeUnit::Millisecond))
+ );
+ assert_eq!(
+ "1002::Microsecond",
+ format!("{:?}", Timestamp::new(1002, TimeUnit::Microsecond))
+ );
+ assert_eq!(
+ "1003::Nanosecond",
+ format!("{:?}", Timestamp::new(1003, TimeUnit::Nanosecond))
+ );
+ }
}
diff --git a/src/mito2/src/compaction/twcs.rs b/src/mito2/src/compaction/twcs.rs
index eebbd1f48def..f8b79cab523a 100644
--- a/src/mito2/src/compaction/twcs.rs
+++ b/src/mito2/src/compaction/twcs.rs
@@ -25,7 +25,7 @@ use crate::compaction::buckets::infer_time_bucket;
use crate::compaction::picker::{CompactionTask, Picker};
use crate::compaction::task::CompactionTaskImpl;
use crate::compaction::{get_expired_ssts, CompactionOutput, CompactionRequest};
-use crate::sst::file::{FileHandle, FileId};
+use crate::sst::file::{overlaps, FileHandle, FileId};
use crate::sst::version::LevelMeta;
/// `TwcsPicker` picks files of which the max timestamp are in the same time window as compaction
@@ -271,15 +271,6 @@ fn assign_to_windows<'a>(
windows.into_iter().map(|w| (w.time_window, w)).collect()
}
-/// Checks if two inclusive timestamp ranges overlap with each other.
-fn overlaps(l: &(Timestamp, Timestamp), r: &(Timestamp, Timestamp)) -> bool {
- let (l, r) = if l.0 <= r.0 { (l, r) } else { (r, l) };
- let (_, l_end) = l;
- let (r_start, _) = r;
-
- r_start <= l_end
-}
-
/// Finds the latest active writing window among all files.
/// Returns `None` when there are no files or all files are corrupted.
fn find_latest_window_in_seconds<'a>(
diff --git a/src/mito2/src/engine/append_mode_test.rs b/src/mito2/src/engine/append_mode_test.rs
index 77f2e4e67dd8..ed9d64ee2c04 100644
--- a/src/mito2/src/engine/append_mode_test.rs
+++ b/src/mito2/src/engine/append_mode_test.rs
@@ -82,7 +82,7 @@ async fn test_append_mode_write_query() {
.scan_region(region_id, ScanRequest::default())
.unwrap();
let seq_scan = scan.seq_scan().unwrap();
- let stream = seq_scan.build_stream().await.unwrap();
+ let stream = seq_scan.build_stream().unwrap();
let batches = RecordBatches::try_collect(stream).await.unwrap();
assert_eq!(expected, batches.pretty_print().unwrap());
}
diff --git a/src/mito2/src/engine/basic_test.rs b/src/mito2/src/engine/basic_test.rs
index 439b3a2fe0d3..9179d8a07411 100644
--- a/src/mito2/src/engine/basic_test.rs
+++ b/src/mito2/src/engine/basic_test.rs
@@ -330,6 +330,8 @@ async fn test_different_order_and_type() {
#[tokio::test]
async fn test_put_delete() {
+ common_telemetry::init_default_ut_logging();
+
let mut env = TestEnv::new();
let engine = env.create_engine(MitoConfig::default()).await;
diff --git a/src/mito2/src/engine/filter_deleted_test.rs b/src/mito2/src/engine/filter_deleted_test.rs
index 4d123a89b8ba..0a89f7b2ef50 100644
--- a/src/mito2/src/engine/filter_deleted_test.rs
+++ b/src/mito2/src/engine/filter_deleted_test.rs
@@ -87,7 +87,7 @@ async fn test_scan_without_filtering_deleted() {
let seq_scan = scan.scan_without_filter_deleted().unwrap();
- let stream = seq_scan.build_stream().await.unwrap();
+ let stream = seq_scan.build_stream().unwrap();
let batches = RecordBatches::try_collect(stream).await.unwrap();
let expected = "\
+-------+---------+---------------------+
diff --git a/src/mito2/src/memtable.rs b/src/mito2/src/memtable.rs
index 6df63f0e9973..b82032bbc8d2 100644
--- a/src/mito2/src/memtable.rs
+++ b/src/mito2/src/memtable.rs
@@ -64,11 +64,19 @@ impl Default for MemtableConfig {
pub struct MemtableStats {
/// The estimated bytes allocated by this memtable from heap.
estimated_bytes: usize,
- /// The time range that this memtable contains.
+ /// The time range that this memtable contains. It is None if
+ /// and only if the memtable is empty.
time_range: Option<(Timestamp, Timestamp)>,
}
impl MemtableStats {
+ /// Attaches the time range to the stats.
+ #[cfg(any(test, feature = "test"))]
+ pub(crate) fn with_time_range(mut self, time_range: Option<(Timestamp, Timestamp)>) -> Self {
+ self.time_range = time_range;
+ self
+ }
+
/// Returns the estimated bytes allocated by this memtable.
pub fn bytes_allocated(&self) -> usize {
self.estimated_bytes
diff --git a/src/mito2/src/read.rs b/src/mito2/src/read.rs
index ff16e72fd76d..042eaf2124f0 100644
--- a/src/mito2/src/read.rs
+++ b/src/mito2/src/read.rs
@@ -23,6 +23,7 @@ pub(crate) mod unordered_scan;
use std::collections::HashSet;
use std::sync::Arc;
+use std::time::Duration;
use api::v1::OpType;
use async_trait::async_trait;
@@ -50,6 +51,7 @@ use crate::error::{
ComputeArrowSnafu, ComputeVectorSnafu, ConvertVectorSnafu, InvalidBatchSnafu, Result,
};
use crate::memtable::BoxedBatchIterator;
+use crate::metrics::{READ_BATCHES_RETURN, READ_ROWS_RETURN, READ_STAGE_ELAPSED};
use crate::sst::parquet::reader::RowGroupReader;
/// Storage internal representation of a batch of rows for a primary key (time series).
@@ -744,6 +746,55 @@ impl<T: BatchReader + ?Sized> BatchReader for Box<T> {
}
}
+/// Metrics for scanners.
+#[derive(Debug, Default)]
+pub(crate) struct ScannerMetrics {
+ /// Duration to prepare the scan task.
+ prepare_scan_cost: Duration,
+ /// Duration to build parts.
+ build_parts_cost: Duration,
+ /// Duration to scan data.
+ scan_cost: Duration,
+ /// Duration to convert batches.
+ convert_cost: Duration,
+ /// Duration of the scan.
+ total_cost: Duration,
+ /// Number of batches returned.
+ num_batches: usize,
+ /// Number of rows returned.
+ num_rows: usize,
+}
+
+impl ScannerMetrics {
+ /// Sets and observes metrics on initializing parts.
+ fn observe_init_part(&mut self, build_parts_cost: Duration) {
+ self.build_parts_cost = build_parts_cost;
+
+ // Observes metrics.
+ READ_STAGE_ELAPSED
+ .with_label_values(&["prepare_scan"])
+ .observe(self.prepare_scan_cost.as_secs_f64());
+ READ_STAGE_ELAPSED
+ .with_label_values(&["build_parts"])
+ .observe(self.build_parts_cost.as_secs_f64());
+ }
+
+ /// Observes metrics on scanner finish.
+ fn observe_metrics_on_finish(&self) {
+ READ_STAGE_ELAPSED
+ .with_label_values(&["convert_rb"])
+ .observe(self.convert_cost.as_secs_f64());
+ READ_STAGE_ELAPSED
+ .with_label_values(&["scan"])
+ .observe(self.scan_cost.as_secs_f64());
+ READ_STAGE_ELAPSED
+ .with_label_values(&["total"])
+ .observe(self.total_cost.as_secs_f64());
+ READ_ROWS_RETURN.observe(self.num_rows as f64);
+ READ_BATCHES_RETURN.observe(self.num_batches as f64);
+ }
+}
+
#[cfg(test)]
mod tests {
use super::*;
diff --git a/src/mito2/src/read/scan_region.rs b/src/mito2/src/read/scan_region.rs
index 2e6325e1b7c5..a185584d4358 100644
--- a/src/mito2/src/read/scan_region.rs
+++ b/src/mito2/src/read/scan_region.rs
@@ -16,16 +16,19 @@
use std::fmt;
use std::sync::Arc;
-use std::time::Instant;
+use std::time::{Duration, Instant};
use common_error::ext::BoxedError;
use common_recordbatch::SendableRecordBatchStream;
use common_telemetry::{debug, error, warn};
use common_time::range::TimestampRange;
-use store_api::region_engine::{RegionScannerRef, SinglePartitionScanner};
+use common_time::Timestamp;
+use datafusion::physical_plan::DisplayFormatType;
+use smallvec::SmallVec;
+use store_api::region_engine::RegionScannerRef;
use store_api::storage::ScanRequest;
use table::predicate::{build_time_range_predicate, Predicate};
-use tokio::sync::{mpsc, Semaphore};
+use tokio::sync::{mpsc, Mutex, Semaphore};
use tokio_stream::wrappers::ReceiverStream;
use crate::access_layer::AccessLayerRef;
@@ -34,13 +37,13 @@ use crate::cache::CacheManagerRef;
use crate::error::Result;
use crate::memtable::MemtableRef;
use crate::metrics::READ_SST_COUNT;
-use crate::read::compat::{CompatBatch, CompatReader};
+use crate::read::compat::{self, CompatBatch};
use crate::read::projection::ProjectionMapper;
use crate::read::seq_scan::SeqScan;
use crate::read::unordered_scan::UnorderedScan;
-use crate::read::{compat, Batch, Source};
+use crate::read::{Batch, Source};
use crate::region::version::VersionRef;
-use crate::sst::file::{FileHandle, FileMeta};
+use crate::sst::file::{overlaps, FileHandle, FileMeta};
use crate::sst::index::applier::builder::SstIndexApplierBuilder;
use crate::sst::index::applier::SstIndexApplierRef;
use crate::sst::parquet::file_range::FileRange;
@@ -57,7 +60,7 @@ impl Scanner {
/// Returns a [SendableRecordBatchStream] to retrieve scan results from all partitions.
pub(crate) async fn scan(&self) -> Result<SendableRecordBatchStream, BoxedError> {
match self {
- Scanner::Seq(seq_scan) => seq_scan.build_stream().await.map_err(BoxedError::new),
+ Scanner::Seq(seq_scan) => seq_scan.build_stream(),
Scanner::Unordered(unordered_scan) => unordered_scan.build_stream().await,
}
}
@@ -65,11 +68,7 @@ impl Scanner {
/// Returns a [RegionScanner] to scan the region.
pub(crate) async fn region_scanner(self) -> Result<RegionScannerRef> {
match self {
- Scanner::Seq(seq_scan) => {
- let stream = seq_scan.build_stream().await?;
- let scanner = Arc::new(SinglePartitionScanner::new(stream));
- Ok(scanner)
- }
+ Scanner::Seq(seq_scan) => Ok(Arc::new(seq_scan)),
Scanner::Unordered(unordered_scan) => Ok(Arc::new(unordered_scan)),
}
}
@@ -221,9 +220,7 @@ impl ScanRegion {
/// Scan sequentially.
pub(crate) fn seq_scan(self) -> Result<SeqScan> {
let input = self.scan_input(true)?;
- let seq_scan = SeqScan::new(input);
-
- Ok(seq_scan)
+ Ok(SeqScan::new(input))
}
/// Unordered scan.
@@ -235,8 +232,7 @@ impl ScanRegion {
#[cfg(test)]
pub(crate) fn scan_without_filter_deleted(self) -> Result<SeqScan> {
let input = self.scan_input(false)?;
- let scan = SeqScan::new(input);
- Ok(scan)
+ Ok(SeqScan::new(input))
}
/// Creates a scan input.
@@ -263,9 +259,8 @@ impl ScanRegion {
return false;
}
let stats = mem.stats();
- let Some((start, end)) = stats.time_range() else {
- return true;
- };
+ // Safety: the memtable is not empty.
+ let (start, end) = stats.time_range().unwrap();
// The time range of the memtable is inclusive.
let memtable_range = TimestampRange::new_inclusive(Some(start), Some(end));
@@ -364,13 +359,6 @@ pub(crate) struct ScanParallism {
pub(crate) channel_size: usize,
}
-impl ScanParallism {
- /// Returns true if we allow parallel scan.
- pub(crate) fn allow_parallel_scan(&self) -> bool {
- self.parallelism > 1
- }
-}
-
/// Returns true if the time range of a SST `file` matches the `predicate`.
fn file_in_range(file: &FileHandle, predicate: &TimestampRange) -> bool {
if predicate == &TimestampRange::min_to_max() {
@@ -509,60 +497,15 @@ impl ScanInput {
self
}
- /// Builds and returns sources to read.
- pub(crate) async fn build_sources(&self) -> Result<Vec<Source>> {
- let mut sources = Vec::with_capacity(self.memtables.len() + self.files.len());
- for mem in &self.memtables {
- let iter = mem.iter(Some(self.mapper.column_ids()), self.predicate.clone())?;
- sources.push(Source::Iter(iter));
- }
- for file in &self.files {
- let maybe_reader = self
- .access_layer
- .read_sst(file.clone())
- .predicate(self.predicate.clone())
- .time_range(self.time_range)
- .projection(Some(self.mapper.column_ids().to_vec()))
- .cache(self.cache_manager.clone())
- .index_applier(self.index_applier.clone())
- .expected_metadata(Some(self.mapper.metadata().clone()))
- .build()
- .await;
- let reader = match maybe_reader {
- Ok(reader) => reader,
- Err(e) => {
- if e.is_object_not_found() && self.ignore_file_not_found {
- error!(e; "File to scan does not exist, region_id: {}, file: {}", file.region_id(), file.file_id());
- continue;
- } else {
- return Err(e);
- }
- }
- };
- if compat::has_same_columns(self.mapper.metadata(), reader.metadata()) {
- sources.push(Source::Reader(Box::new(reader)));
- } else {
- // They have different schema. We need to adapt the batch first so the
- // mapper can convert it.
- let compat_reader =
- CompatReader::new(&self.mapper, reader.metadata().clone(), reader)?;
- sources.push(Source::Reader(Box::new(compat_reader)));
- }
- }
-
- READ_SST_COUNT.observe(self.files.len() as f64);
-
- Ok(sources)
- }
-
/// Scans sources in parallel.
///
/// # Panics if the input doesn't allow parallel scan.
- pub(crate) async fn build_parallel_sources(&self) -> Result<Vec<Source>> {
- assert!(self.parallelism.allow_parallel_scan());
- // Scall all memtables and SSTs.
- let sources = self.build_sources().await?;
- let semaphore = Arc::new(Semaphore::new(self.parallelism.parallelism));
+ pub(crate) fn create_parallel_sources(
+ &self,
+ sources: Vec<Source>,
+ semaphore: Arc<Semaphore>,
+ ) -> Result<Vec<Source>> {
+ debug_assert!(self.parallelism.parallelism > 1);
// Spawn a task for each source.
let sources = sources
.into_iter()
@@ -576,7 +519,7 @@ impl ScanInput {
Ok(sources)
}
- /// Prunes file ranges to scan and adds them tothe `collector`.
+ /// Prunes file ranges to scan and adds them to the `collector`.
pub(crate) async fn prune_file_ranges(
&self,
collector: &mut impl FileRangeCollector,
@@ -641,7 +584,7 @@ impl ScanInput {
common_runtime::spawn_read(async move {
loop {
// We release the permit before sending result to avoid the task waiting on
- // the channel with the permit holded
+ // the channel with the permit held.
let maybe_batch = {
// Safety: We never close the semaphore.
let _permit = semaphore.acquire().await.unwrap();
@@ -680,6 +623,10 @@ impl ScanInput {
}
}
+/// Groups of file ranges. Each group in the list contains multiple file
+/// ranges to scan. File ranges in the same group may come from different files.
+pub(crate) type FileRangesGroup = SmallVec<[Vec<FileRange>; 4]>;
+
/// A partition of a scanner to read.
/// It contains memtables and file ranges to scan.
#[derive(Default)]
@@ -688,17 +635,60 @@ pub(crate) struct ScanPart {
/// We scan the whole memtable now. We might scan a range of the memtable in the future.
pub(crate) memtables: Vec<MemtableRef>,
/// File ranges to scan.
- pub(crate) file_ranges: Vec<FileRange>,
+ pub(crate) file_ranges: FileRangesGroup,
+ /// Optional time range of the part (inclusive).
+ pub(crate) time_range: Option<(Timestamp, Timestamp)>,
}
impl fmt::Debug for ScanPart {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
- "ScanPart({} memtables, {} file ranges)",
+ "ScanPart({} memtables, {} file ranges",
self.memtables.len(),
- self.file_ranges.len()
- )
+ self.file_ranges
+ .iter()
+ .map(|ranges| ranges.len())
+ .sum::<usize>(),
+ )?;
+ if let Some(time_range) = &self.time_range {
+ write!(f, ", time range: {:?})", time_range)
+ } else {
+ write!(f, ")")
+ }
+ }
+}
+
+impl ScanPart {
+ /// Returns true if the time range given `part` overlaps with this part.
+ pub(crate) fn overlaps(&self, part: &ScanPart) -> bool {
+ let (Some(current_range), Some(part_range)) = (self.time_range, part.time_range) else {
+ return true;
+ };
+
+ overlaps(¤t_range, &part_range)
+ }
+
+ /// Merges given `part` to this part.
+ pub(crate) fn merge(&mut self, mut part: ScanPart) {
+ self.memtables.append(&mut part.memtables);
+ self.file_ranges.append(&mut part.file_ranges);
+ let Some(part_range) = part.time_range else {
+ return;
+ };
+ let Some(current_range) = self.time_range else {
+ self.time_range = part.time_range;
+ return;
+ };
+ let start = current_range.0.min(part_range.0);
+ let end = current_range.1.max(part_range.1);
+ self.time_range = Some((start, end));
+ }
+
+ /// Returns true if the we can split the part into multiple parts
+ /// and preserving order.
+ pub(crate) fn can_split_preserve_order(&self) -> bool {
+ self.memtables.is_empty() && self.file_ranges.len() == 1 && self.file_ranges[0].len() > 1
}
}
@@ -711,3 +701,105 @@ pub(crate) trait FileRangeCollector {
file_ranges: impl Iterator<Item = FileRange>,
);
}
+
+/// Optional list of [ScanPart]s.
+#[derive(Default)]
+pub(crate) struct ScanPartList(pub(crate) Option<Vec<ScanPart>>);
+
+impl fmt::Debug for ScanPartList {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match &self.0 {
+ Some(parts) => write!(f, "{:?}", parts),
+ None => write!(f, "[]"),
+ }
+ }
+}
+
+impl ScanPartList {
+ /// Returns true if the list is None.
+ pub(crate) fn is_none(&self) -> bool {
+ self.0.is_none()
+ }
+
+ /// Sets parts to the list.
+ pub(crate) fn set_parts(&mut self, parts: Vec<ScanPart>) {
+ self.0 = Some(parts);
+ }
+
+ /// Gets the part by index, returns None if the index is out of bound.
+ /// # Panics
+ /// Panics if parts are not initialized.
+ pub(crate) fn get_part(&mut self, index: usize) -> Option<&ScanPart> {
+ let parts = self.0.as_ref().unwrap();
+ parts.get(index)
+ }
+
+ /// Returns the number of parts.
+ pub(crate) fn len(&self) -> usize {
+ self.0.as_ref().map_or(0, |parts| parts.len())
+ }
+
+ /// Returns the number of memtables.
+ pub(crate) fn num_memtables(&self) -> usize {
+ self.0.as_ref().map_or(0, |parts| {
+ parts.iter().map(|part| part.memtables.len()).sum()
+ })
+ }
+
+ /// Returns the number of file ranges.
+ pub(crate) fn num_file_ranges(&self) -> usize {
+ self.0.as_ref().map_or(0, |parts| {
+ parts.iter().map(|part| part.file_ranges.len()).sum()
+ })
+ }
+}
+
+/// Context shared by different streams from a scanner.
+/// It contains the input and distributes input to multiple parts
+/// to scan.
+pub(crate) struct StreamContext {
+ /// Input memtables and files.
+ pub(crate) input: ScanInput,
+ /// Parts to scan.
+ /// The scanner builds parts to scan from the input lazily.
+ /// The mutex is used to ensure the parts are only built once.
+ pub(crate) parts: Mutex<ScanPartList>,
+
+ // Metrics:
+ /// The start time of the query.
+ pub(crate) query_start: Instant,
+ /// Time elapsed before creating the scanner.
+ pub(crate) prepare_scan_cost: Duration,
+}
+
+impl StreamContext {
+ /// Creates a new [StreamContext].
+ pub(crate) fn new(input: ScanInput) -> Self {
+ let query_start = input.query_start.unwrap_or_else(Instant::now);
+ let prepare_scan_cost = query_start.elapsed();
+
+ Self {
+ input,
+ parts: Mutex::new(ScanPartList::default()),
+ query_start,
+ prepare_scan_cost,
+ }
+ }
+
+ /// Format parts for explain.
+ pub(crate) fn format_parts(&self, t: DisplayFormatType, f: &mut fmt::Formatter) -> fmt::Result {
+ match self.parts.try_lock() {
+ Ok(inner) => match t {
+ DisplayFormatType::Default => write!(
+ f,
+ "partition_count={} ({} memtables, {} file ranges)",
+ inner.len(),
+ inner.num_memtables(),
+ inner.num_file_ranges()
+ ),
+ DisplayFormatType::Verbose => write!(f, "{:?}", &*inner),
+ },
+ Err(_) => write!(f, "<locked>"),
+ }
+ }
+}
diff --git a/src/mito2/src/read/seq_scan.rs b/src/mito2/src/read/seq_scan.rs
index 2277a8df32f9..cc810ba0d8b6 100644
--- a/src/mito2/src/read/seq_scan.rs
+++ b/src/mito2/src/read/seq_scan.rs
@@ -14,172 +14,550 @@
//! Sequential scan.
-use std::time::{Duration, Instant};
+use std::fmt;
+use std::sync::Arc;
+use std::time::Instant;
use async_stream::try_stream;
use common_error::ext::BoxedError;
use common_recordbatch::error::ExternalSnafu;
-use common_recordbatch::{RecordBatch, RecordBatchStreamWrapper, SendableRecordBatchStream};
-use common_telemetry::{debug, tracing};
+use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
+use common_telemetry::debug;
+use datafusion::physical_plan::{DisplayAs, DisplayFormatType};
+use datatypes::schema::SchemaRef;
+use smallvec::smallvec;
use snafu::ResultExt;
+use store_api::region_engine::{RegionScanner, ScannerPartitioning, ScannerProperties};
+use store_api::storage::ColumnId;
+use table::predicate::Predicate;
+use tokio::sync::Semaphore;
-use crate::cache::CacheManager;
use crate::error::Result;
-use crate::metrics::{READ_BATCHES_RETURN, READ_ROWS_RETURN, READ_STAGE_ELAPSED};
-use crate::read::merge::MergeReaderBuilder;
-use crate::read::projection::ProjectionMapper;
-use crate::read::scan_region::ScanInput;
-use crate::read::{BatchReader, BoxedBatchReader};
+use crate::memtable::MemtableRef;
+use crate::read::merge::{MergeReader, MergeReaderBuilder};
+use crate::read::scan_region::{
+ FileRangeCollector, ScanInput, ScanPart, ScanPartList, StreamContext,
+};
+use crate::read::{BatchReader, BoxedBatchReader, ScannerMetrics, Source};
+use crate::sst::file::FileMeta;
+use crate::sst::parquet::file_range::FileRange;
+use crate::sst::parquet::reader::ReaderMetrics;
/// Scans a region and returns rows in a sorted sequence.
///
-/// The output order is always `order by primary key, time index`.
+/// The output order is always `order by primary keys, time index`.
pub struct SeqScan {
- input: ScanInput,
+ /// Properties of the scanner.
+ properties: ScannerProperties,
+ /// Context of streams.
+ stream_ctx: Arc<StreamContext>,
+ /// Semaphore to control scan parallelism of files.
+ /// Streams created by the scanner share the same semaphore.
+ semaphore: Arc<Semaphore>,
}
impl SeqScan {
/// Creates a new [SeqScan].
- #[must_use]
- pub(crate) fn new(input: ScanInput) -> SeqScan {
- SeqScan { input }
+ pub(crate) fn new(input: ScanInput) -> Self {
+ let parallelism = input.parallelism.parallelism.max(1);
+ let properties = ScannerProperties::new(ScannerPartitioning::Unknown(parallelism));
+ let stream_ctx = Arc::new(StreamContext::new(input));
+
+ Self {
+ properties,
+ stream_ctx,
+ semaphore: Arc::new(Semaphore::new(parallelism)),
+ }
}
/// Builds a stream for the query.
- pub async fn build_stream(&self) -> Result<SendableRecordBatchStream> {
- let mut metrics = Metrics::default();
- let build_start = Instant::now();
- let query_start = self.input.query_start.unwrap_or(build_start);
- metrics.prepare_scan_cost = query_start.elapsed();
- let use_parallel = self.use_parallel_reader();
- // Scans all memtables and SSTs. Builds a merge reader to merge results.
- let mut reader = if use_parallel {
- self.build_parallel_reader().await?
+ pub fn build_stream(&self) -> Result<SendableRecordBatchStream, BoxedError> {
+ self.scan_partition_opt(None)
+ }
+
+ /// Builds a [BoxedBatchReader] from sequential scan for compaction.
+ pub async fn build_reader(&self) -> Result<BoxedBatchReader> {
+ let mut metrics = ScannerMetrics {
+ prepare_scan_cost: self.stream_ctx.prepare_scan_cost,
+ ..Default::default()
+ };
+ let maybe_reader =
+ Self::build_merge_reader(&self.stream_ctx, None, self.semaphore.clone(), &mut metrics)
+ .await?;
+ // Safety: `build_merge_reader()` always returns a reader if partition is None.
+ let reader = maybe_reader.unwrap();
+ Ok(Box::new(reader))
+ }
+
+ /// Builds sources from a [ScanPart].
+ fn build_part_sources(
+ part: &ScanPart,
+ projection: Option<&[ColumnId]>,
+ predicate: Option<&Predicate>,
+ sources: &mut Vec<Source>,
+ ) -> Result<()> {
+ sources.reserve(part.memtables.len() + part.file_ranges.len());
+ // Read memtables.
+ for mem in &part.memtables {
+ let iter = mem.iter(projection, predicate.cloned())?;
+ sources.push(Source::Iter(iter));
+ }
+ // Read files.
+ for file in &part.file_ranges {
+ if file.is_empty() {
+ continue;
+ }
+
+ // Creates a stream to read the file.
+ let ranges = file.clone();
+ let stream = try_stream! {
+ let mut reader_metrics = ReaderMetrics::default();
+ // Safety: We checked whether it is empty before.
+ let file_id = ranges[0].file_handle().file_id();
+ let region_id = ranges[0].file_handle().region_id();
+ let range_num = ranges.len();
+ for range in ranges {
+ let mut reader = range.reader().await?;
+ let compat_batch = range.compat_batch();
+ while let Some(mut batch) = reader.next_batch().await? {
+ if let Some(compat) = compat_batch {
+ batch = compat
+ .compat_batch(batch)?;
+ }
+
+ yield batch;
+ }
+ reader_metrics.merge_from(reader.metrics());
+ }
+ debug!(
+ "Seq scan region {}, file {}, {} ranges finished, metrics: {:?}",
+ region_id, file_id, range_num, reader_metrics
+ );
+ };
+ let stream = Box::pin(stream);
+ sources.push(Source::Stream(stream));
+ }
+
+ Ok(())
+ }
+
+ /// Builds a merge reader.
+ /// If `partition` is None, reads all partitions.
+ /// If the `partition` is out of bound, returns None.
+ async fn build_merge_reader(
+ stream_ctx: &StreamContext,
+ partition: Option<usize>,
+ semaphore: Arc<Semaphore>,
+ metrics: &mut ScannerMetrics,
+ ) -> Result<Option<MergeReader>> {
+ let mut parts = stream_ctx.parts.lock().await;
+ maybe_init_parts(&stream_ctx.input, &mut parts, metrics).await?;
+
+ let input = &stream_ctx.input;
+ let mut sources = Vec::new();
+ if let Some(index) = partition {
+ let Some(part) = parts.get_part(index) else {
+ return Ok(None);
+ };
+
+ Self::build_part_sources(
+ part,
+ Some(input.mapper.column_ids()),
+ input.predicate.as_ref(),
+ &mut sources,
+ )?;
} else {
- self.build_reader().await?
+ // Safety: We initialized parts before.
+ for part in parts.0.as_ref().unwrap() {
+ Self::build_part_sources(
+ part,
+ Some(input.mapper.column_ids()),
+ input.predicate.as_ref(),
+ &mut sources,
+ )?;
+ }
+ }
+
+ if stream_ctx.input.parallelism.parallelism > 1 {
+ // Read sources in parallel. We always spawn a task so we can control the parallelism
+ // by the semaphore.
+ sources = stream_ctx
+ .input
+ .create_parallel_sources(sources, semaphore.clone())?;
+ }
+
+ let dedup = !stream_ctx.input.append_mode;
+ let mut builder =
+ MergeReaderBuilder::from_sources(sources, dedup, stream_ctx.input.filter_deleted);
+ builder.build().await.map(Some)
+ }
+
+ /// Scans one partition or all partitions.
+ fn scan_partition_opt(
+ &self,
+ partition: Option<usize>,
+ ) -> Result<SendableRecordBatchStream, BoxedError> {
+ let mut metrics = ScannerMetrics {
+ prepare_scan_cost: self.stream_ctx.prepare_scan_cost,
+ ..Default::default()
};
- metrics.build_reader_cost = build_start.elapsed();
- READ_STAGE_ELAPSED
- .with_label_values(&["prepare_scan"])
- .observe(metrics.prepare_scan_cost.as_secs_f64());
- READ_STAGE_ELAPSED
- .with_label_values(&["build_reader"])
- .observe(metrics.build_reader_cost.as_secs_f64());
-
- // Creates a stream to poll the batch reader and convert batch into record batch.
- let mapper = self.input.mapper.clone();
- let cache_manager = self.input.cache_manager.clone();
- let parallelism = self.input.parallelism.parallelism;
+ let stream_ctx = self.stream_ctx.clone();
+ let semaphore = self.semaphore.clone();
let stream = try_stream! {
- let cache = cache_manager.as_ref().map(|cache| cache.as_ref());
- while let Some(batch) =
- Self::fetch_record_batch(&mut reader, &mapper, cache, &mut metrics).await?
+ let maybe_reader = Self::build_merge_reader(&stream_ctx, partition, semaphore, &mut metrics)
+ .await
+ .map_err(BoxedError::new)
+ .context(ExternalSnafu)?;
+ let Some(mut reader) = maybe_reader else {
+ return;
+ };
+ let cache = stream_ctx.input.cache_manager.as_deref();
+ let mut fetch_start = Instant::now();
+ while let Some(batch) = reader
+ .next_batch()
+ .await
+ .map_err(BoxedError::new)
+ .context(ExternalSnafu)?
{
+ metrics.scan_cost += fetch_start.elapsed();
metrics.num_batches += 1;
metrics.num_rows += batch.num_rows();
- yield batch;
+
+ let convert_start = Instant::now();
+ let record_batch = stream_ctx.input.mapper.convert(&batch, cache)?;
+ metrics.convert_cost += convert_start.elapsed();
+ yield record_batch;
+
+ fetch_start = Instant::now();
}
+ metrics.scan_cost += fetch_start.elapsed();
+ metrics.total_cost = stream_ctx.query_start.elapsed();
+ metrics.observe_metrics_on_finish();
- // Update metrics.
- metrics.total_cost = query_start.elapsed();
- READ_STAGE_ELAPSED.with_label_values(&["convert_rb"]).observe(metrics.convert_cost.as_secs_f64());
- READ_STAGE_ELAPSED.with_label_values(&["scan"]).observe(metrics.scan_cost.as_secs_f64());
- READ_STAGE_ELAPSED.with_label_values(&["total"]).observe(metrics.total_cost.as_secs_f64());
- READ_ROWS_RETURN.observe(metrics.num_rows as f64);
- READ_BATCHES_RETURN.observe(metrics.num_batches as f64);
debug!(
- "Seq scan finished, region_id: {:?}, metrics: {:?}, use_parallel: {}, parallelism: {}",
- mapper.metadata().region_id, metrics, use_parallel, parallelism,
+ "Seq scan finished, region_id: {:?}, partition: {:?}, metrics: {:?}",
+ stream_ctx.input.mapper.metadata().region_id, partition, metrics,
);
};
+
let stream = Box::pin(RecordBatchStreamWrapper::new(
- self.input.mapper.output_schema(),
+ self.stream_ctx.input.mapper.output_schema(),
Box::pin(stream),
));
Ok(stream)
}
+}
- /// Builds a [BoxedBatchReader] from sequential scan.
- pub async fn build_reader(&self) -> Result<BoxedBatchReader> {
- // Scans all memtables and SSTs. Builds a merge reader to merge results.
- let sources = self.input.build_sources().await?;
- let dedup = !self.input.append_mode;
- let mut builder =
- MergeReaderBuilder::from_sources(sources, dedup, self.input.filter_deleted);
- let reader = builder.build().await?;
- Ok(Box::new(reader))
+impl RegionScanner for SeqScan {
+ fn properties(&self) -> &ScannerProperties {
+ &self.properties
}
- /// Builds a [BoxedBatchReader] that can scan memtables and SSTs in parallel.
- async fn build_parallel_reader(&self) -> Result<BoxedBatchReader> {
- let sources = self.input.build_parallel_sources().await?;
- let dedup = !self.input.append_mode;
- let mut builder =
- MergeReaderBuilder::from_sources(sources, dedup, self.input.filter_deleted);
- let reader = builder.build().await?;
- Ok(Box::new(reader))
+ fn schema(&self) -> SchemaRef {
+ self.stream_ctx.input.mapper.output_schema()
}
- /// Returns whether to use a parallel reader.
- fn use_parallel_reader(&self) -> bool {
- self.input.parallelism.allow_parallel_scan()
- && (self.input.files.len() + self.input.memtables.len()) > 1
+ fn scan_partition(&self, partition: usize) -> Result<SendableRecordBatchStream, BoxedError> {
+ self.scan_partition_opt(Some(partition))
}
+}
- /// Fetch a batch from the reader and convert it into a record batch.
- #[tracing::instrument(skip_all, level = "trace")]
- async fn fetch_record_batch(
- reader: &mut dyn BatchReader,
- mapper: &ProjectionMapper,
- cache: Option<&CacheManager>,
- metrics: &mut Metrics,
- ) -> common_recordbatch::error::Result<Option<RecordBatch>> {
- let start = Instant::now();
+impl DisplayAs for SeqScan {
+ fn fmt_as(&self, t: DisplayFormatType, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "SeqScan: ")?;
+ self.stream_ctx.format_parts(t, f)
+ }
+}
- let Some(batch) = reader
- .next_batch()
- .await
- .map_err(BoxedError::new)
- .context(ExternalSnafu)?
- else {
- metrics.scan_cost += start.elapsed();
+impl fmt::Debug for SeqScan {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SeqScan")
+ .field("parts", &self.stream_ctx.parts)
+ .field("prepare_scan_cost", &self.stream_ctx.prepare_scan_cost)
+ .finish()
+ }
+}
+
+#[cfg(test)]
+impl SeqScan {
+ /// Returns the input.
+ pub(crate) fn input(&self) -> &ScanInput {
+ &self.stream_ctx.input
+ }
+}
- return Ok(None);
+/// Initializes parts if they are not built yet.
+async fn maybe_init_parts(
+ input: &ScanInput,
+ part_list: &mut ScanPartList,
+ metrics: &mut ScannerMetrics,
+) -> Result<()> {
+ if part_list.is_none() {
+ let now = Instant::now();
+ let mut distributor = SeqDistributor::default();
+ input.prune_file_ranges(&mut distributor).await?;
+ part_list
+ .set_parts(distributor.build_parts(&input.memtables, input.parallelism.parallelism));
+
+ metrics.observe_init_part(now.elapsed());
+ }
+ Ok(())
+}
+
+/// Builds [ScanPart]s that preserves order.
+#[derive(Default)]
+pub(crate) struct SeqDistributor {
+ parts: Vec<ScanPart>,
+}
+
+impl FileRangeCollector for SeqDistributor {
+ fn append_file_ranges(
+ &mut self,
+ file_meta: &FileMeta,
+ file_ranges: impl Iterator<Item = FileRange>,
+ ) {
+ // Creates a [ScanPart] for each file.
+ let ranges: Vec<_> = file_ranges.collect();
+ if ranges.is_empty() {
+ // No ranges to read.
+ return;
+ }
+ let part = ScanPart {
+ memtables: Vec::new(),
+ file_ranges: smallvec![ranges],
+ time_range: Some(file_meta.time_range),
};
+ self.parts.push(part);
+ }
+}
+
+impl SeqDistributor {
+ /// Groups file ranges and memtables by time ranges.
+ /// The output number of parts may be `<= parallelism`. If `parallelism` is 0, it will be set to 1.
+ ///
+ /// Output parts have non-overlapping time ranges.
+ fn build_parts(mut self, memtables: &[MemtableRef], parallelism: usize) -> Vec<ScanPart> {
+ // Creates a part for each memtable.
+ for mem in memtables {
+ let stats = mem.stats();
+ let part = ScanPart {
+ memtables: vec![mem.clone()],
+ file_ranges: smallvec![],
+ time_range: stats.time_range(),
+ };
+ self.parts.push(part);
+ }
+
+ let parallelism = parallelism.max(1);
+ let parts = group_parts_by_range(self.parts);
+ let parts = maybe_split_parts(parts, parallelism);
+ // Ensures it doesn't returns parts more than `parallelism`.
+ maybe_merge_parts(parts, parallelism)
+ }
+}
+
+/// Groups parts by time range. It may generate parts more than parallelism.
+/// All time ranges are not None.
+fn group_parts_by_range(mut parts: Vec<ScanPart>) -> Vec<ScanPart> {
+ if parts.is_empty() {
+ return Vec::new();
+ }
+
+ // Sorts parts by time range.
+ parts.sort_unstable_by(|a, b| {
+ // Safety: time ranges of parts from [SeqPartBuilder] are not None.
+ let a = a.time_range.unwrap();
+ let b = b.time_range.unwrap();
+ a.0.cmp(&b.0).then_with(|| b.1.cmp(&a.1))
+ });
+ let mut part_in_range = None;
+ // Parts with exclusive time ranges.
+ let mut part_groups = Vec::new();
+ for part in parts {
+ let Some(mut prev_part) = part_in_range.take() else {
+ part_in_range = Some(part);
+ continue;
+ };
+
+ if prev_part.overlaps(&part) {
+ prev_part.merge(part);
+ part_in_range = Some(prev_part);
+ } else {
+ // A new group.
+ part_groups.push(prev_part);
+ part_in_range = Some(part);
+ }
+ }
+ if let Some(part) = part_in_range {
+ part_groups.push(part);
+ }
+
+ part_groups
+}
+
+/// Merges parts by parallelism.
+/// It merges parts if the number of parts is greater than `parallelism`.
+fn maybe_merge_parts(mut parts: Vec<ScanPart>, parallelism: usize) -> Vec<ScanPart> {
+ assert!(parallelism > 0);
+ if parts.len() <= parallelism {
+ // No need to merge parts.
+ return parts;
+ }
- let convert_start = Instant::now();
- let record_batch = mapper.convert(&batch, cache)?;
- metrics.convert_cost += convert_start.elapsed();
- metrics.scan_cost += start.elapsed();
+ // Sort parts by number of memtables and ranges in reverse order.
+ parts.sort_unstable_by(|a, b| {
+ a.memtables
+ .len()
+ .cmp(&b.memtables.len())
+ .then_with(|| {
+ let a_ranges_len = a
+ .file_ranges
+ .iter()
+ .map(|ranges| ranges.len())
+ .sum::<usize>();
+ let b_ranges_len = b
+ .file_ranges
+ .iter()
+ .map(|ranges| ranges.len())
+ .sum::<usize>();
+ a_ranges_len.cmp(&b_ranges_len)
+ })
+ .reverse()
+ });
- Ok(Some(record_batch))
+ let parts_to_reduce = parts.len() - parallelism;
+ for _ in 0..parts_to_reduce {
+ // Safety: We ensure `parts.len() > parallelism`.
+ let part = parts.pop().unwrap();
+ parts.last_mut().unwrap().merge(part);
}
+
+ parts
}
-/// Metrics for [SeqScan].
-#[derive(Debug, Default)]
-struct Metrics {
- /// Duration to prepare the scan task.
- prepare_scan_cost: Duration,
- /// Duration to build the reader.
- build_reader_cost: Duration,
- /// Duration to scan data.
- scan_cost: Duration,
- /// Duration to convert batches.
- convert_cost: Duration,
- /// Duration of the scan.
- total_cost: Duration,
- /// Number of batches returned.
- num_batches: usize,
- /// Number of rows returned.
- num_rows: usize,
+/// Splits parts by parallelism.
+/// It splits a part if it only scans one file and doesn't scan any memtable.
+fn maybe_split_parts(mut parts: Vec<ScanPart>, parallelism: usize) -> Vec<ScanPart> {
+ assert!(parallelism > 0);
+ if parts.len() >= parallelism {
+ // No need to split parts.
+ return parts;
+ }
+
+ let has_part_to_split = parts.iter().any(|part| part.can_split_preserve_order());
+ if !has_part_to_split {
+ // No proper parts to scan.
+ return parts;
+ }
+
+ // Sorts parts by the number of ranges in the first file.
+ parts.sort_unstable_by(|a, b| {
+ let a_len = a.file_ranges.first().map(|file| file.len()).unwrap_or(0);
+ let b_len = b.file_ranges.first().map(|file| file.len()).unwrap_or(0);
+ a_len.cmp(&b_len).reverse()
+ });
+ let num_parts_to_split = parallelism - parts.len();
+ let mut output_parts = Vec::with_capacity(parallelism);
+ // Split parts up to num_parts_to_split.
+ for part in parts.iter_mut() {
+ if !part.can_split_preserve_order() {
+ continue;
+ }
+ // Safety: `can_split_preserve_order()` ensures file_ranges.len() == 1.
+ // Splits part into `num_parts_to_split + 1` new parts if possible.
+ let target_part_num = num_parts_to_split + 1;
+ let ranges_per_part = (part.file_ranges[0].len() + target_part_num - 1) / target_part_num;
+ // `can_split_preserve_order()` ensures part.file_ranges[0].len() > 1.
+ assert!(ranges_per_part > 0);
+ for ranges in part.file_ranges[0].chunks(ranges_per_part) {
+ let new_part = ScanPart {
+ memtables: Vec::new(),
+ file_ranges: smallvec![ranges.to_vec()],
+ time_range: part.time_range,
+ };
+ output_parts.push(new_part);
+ }
+ // Replace the current part with the last output part as we will put the current part
+ // into the output parts later.
+ *part = output_parts.pop().unwrap();
+ if output_parts.len() >= num_parts_to_split {
+ // We already split enough parts.
+ break;
+ }
+ }
+ // Put the remaining parts into the output parts.
+ output_parts.append(&mut parts);
+
+ output_parts
}
#[cfg(test)]
-impl SeqScan {
- /// Returns the input.
- pub(crate) fn input(&self) -> &ScanInput {
- &self.input
+mod tests {
+ use std::sync::Arc;
+
+ use common_time::timestamp::TimeUnit;
+ use common_time::Timestamp;
+
+ use super::*;
+ use crate::memtable::MemtableId;
+ use crate::test_util::memtable_util::EmptyMemtable;
+
+ type Output = (Vec<MemtableId>, i64, i64);
+
+ fn run_group_parts_test(input: &[(MemtableId, i64, i64)], expect: &[Output]) {
+ let parts = input
+ .iter()
+ .map(|(id, start, end)| {
+ let range = (
+ Timestamp::new(*start, TimeUnit::Second),
+ Timestamp::new(*end, TimeUnit::Second),
+ );
+ ScanPart {
+ memtables: vec![Arc::new(
+ EmptyMemtable::new(*id).with_time_range(Some(range)),
+ )],
+ file_ranges: smallvec![],
+ time_range: Some(range),
+ }
+ })
+ .collect();
+ let output = group_parts_by_range(parts);
+ let actual: Vec<_> = output
+ .iter()
+ .map(|part| {
+ let ids: Vec<_> = part.memtables.iter().map(|mem| mem.id()).collect();
+ let range = part.time_range.unwrap();
+ (ids, range.0.value(), range.1.value())
+ })
+ .collect();
+ assert_eq!(expect, actual);
+ }
+
+ #[test]
+ fn test_group_parts() {
+ // Group 1 part.
+ run_group_parts_test(&[(1, 0, 2000)], &[(vec![1], 0, 2000)]);
+
+ // 1, 2, 3, 4 => [3, 1, 4], [2]
+ run_group_parts_test(
+ &[
+ (1, 1000, 2000),
+ (2, 6000, 7000),
+ (3, 0, 1500),
+ (4, 1500, 3000),
+ ],
+ &[(vec![3, 1, 4], 0, 3000), (vec![2], 6000, 7000)],
+ );
+
+ // 1, 2, 3 => [3], [1], [2],
+ run_group_parts_test(
+ &[(1, 3000, 4000), (2, 4001, 6000), (3, 0, 1000)],
+ &[
+ (vec![3], 0, 1000),
+ (vec![1], 3000, 4000),
+ (vec![2], 4001, 6000),
+ ],
+ );
}
}
diff --git a/src/mito2/src/read/unordered_scan.rs b/src/mito2/src/read/unordered_scan.rs
index 48f682969d32..eccd8ec88c79 100644
--- a/src/mito2/src/read/unordered_scan.rs
+++ b/src/mito2/src/read/unordered_scan.rs
@@ -16,7 +16,7 @@
use std::fmt;
use std::sync::Arc;
-use std::time::{Duration, Instant};
+use std::time::Instant;
use async_stream::{stream, try_stream};
use common_error::ext::BoxedError;
@@ -26,18 +26,19 @@ use common_telemetry::debug;
use datafusion::physical_plan::{DisplayAs, DisplayFormatType};
use datatypes::schema::SchemaRef;
use futures::StreamExt;
+use smallvec::smallvec;
use snafu::ResultExt;
use store_api::region_engine::{RegionScanner, ScannerPartitioning, ScannerProperties};
-use tokio::sync::Mutex;
use crate::cache::CacheManager;
use crate::error::Result;
use crate::memtable::MemtableRef;
-use crate::metrics::{READ_BATCHES_RETURN, READ_ROWS_RETURN, READ_STAGE_ELAPSED};
use crate::read::compat::CompatBatch;
use crate::read::projection::ProjectionMapper;
-use crate::read::scan_region::{FileRangeCollector, ScanInput, ScanPart};
-use crate::read::Source;
+use crate::read::scan_region::{
+ FileRangeCollector, ScanInput, ScanPart, ScanPartList, StreamContext,
+};
+use crate::read::{ScannerMetrics, Source};
use crate::sst::file::FileMeta;
use crate::sst::parquet::file_range::FileRange;
use crate::sst::parquet::reader::ReaderMetrics;
@@ -55,22 +56,10 @@ pub struct UnorderedScan {
impl UnorderedScan {
/// Creates a new [UnorderedScan].
pub(crate) fn new(input: ScanInput) -> Self {
- let query_start = input.query_start.unwrap_or_else(Instant::now);
- let prepare_scan_cost = query_start.elapsed();
- let properties =
- ScannerProperties::new(ScannerPartitioning::Unknown(input.parallelism.parallelism));
-
- // Observes metrics.
- READ_STAGE_ELAPSED
- .with_label_values(&["prepare_scan"])
- .observe(prepare_scan_cost.as_secs_f64());
-
- let stream_ctx = Arc::new(StreamContext {
- input,
- parts: Mutex::new(ScanPartList::default()),
- query_start,
- prepare_scan_cost,
- });
+ let properties = ScannerProperties::new(ScannerPartitioning::Unknown(
+ input.parallelism.parallelism.max(1),
+ ));
+ let stream_ctx = Arc::new(StreamContext::new(input));
Self {
properties,
@@ -104,7 +93,7 @@ impl UnorderedScan {
mapper: &ProjectionMapper,
cache: Option<&CacheManager>,
compat_batch: Option<&CompatBatch>,
- metrics: &mut Metrics,
+ metrics: &mut ScannerMetrics,
) -> common_recordbatch::error::Result<Option<RecordBatch>> {
let start = Instant::now();
@@ -133,20 +122,6 @@ impl UnorderedScan {
Ok(Some(record_batch))
}
-
- fn observe_metrics_on_finish(metrics: &Metrics) {
- READ_STAGE_ELAPSED
- .with_label_values(&["convert_rb"])
- .observe(metrics.convert_cost.as_secs_f64());
- READ_STAGE_ELAPSED
- .with_label_values(&["scan"])
- .observe(metrics.scan_cost.as_secs_f64());
- READ_STAGE_ELAPSED
- .with_label_values(&["total"])
- .observe(metrics.total_cost.as_secs_f64());
- READ_ROWS_RETURN.observe(metrics.num_rows as f64);
- READ_BATCHES_RETURN.observe(metrics.num_batches as f64);
- }
}
impl RegionScanner for UnorderedScan {
@@ -159,15 +134,14 @@ impl RegionScanner for UnorderedScan {
}
fn scan_partition(&self, partition: usize) -> Result<SendableRecordBatchStream, BoxedError> {
- let mut metrics = Metrics {
+ let mut metrics = ScannerMetrics {
prepare_scan_cost: self.stream_ctx.prepare_scan_cost,
..Default::default()
};
let stream_ctx = self.stream_ctx.clone();
let stream = try_stream! {
let mut parts = stream_ctx.parts.lock().await;
- parts
- .maybe_init_parts(&stream_ctx.input, &mut metrics)
+ maybe_init_parts(&mut parts, &stream_ctx.input, &mut metrics)
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
@@ -201,7 +175,8 @@ impl RegionScanner for UnorderedScan {
}
// Then scans file ranges.
let mut reader_metrics = ReaderMetrics::default();
- for file_range in &part.file_ranges {
+ // Safety: UnorderedDistributor::build_parts() ensures this.
+ for file_range in &part.file_ranges[0] {
let reader = file_range.reader().await.map_err(BoxedError::new).context(ExternalSnafu)?;
let compat_batch = file_range.compat_batch();
let mut source = Source::RowGroupReader(reader);
@@ -216,7 +191,7 @@ impl RegionScanner for UnorderedScan {
}
metrics.total_cost = query_start.elapsed();
- Self::observe_metrics_on_finish(&metrics);
+ metrics.observe_metrics_on_finish();
debug!(
"Unordered scan partition {} finished, region_id: {}, metrics: {:?}, reader_metrics: {:?}",
partition, mapper.metadata().region_id, metrics, reader_metrics
@@ -232,8 +207,9 @@ impl RegionScanner for UnorderedScan {
}
impl DisplayAs for UnorderedScan {
- fn fmt_as(&self, _t: DisplayFormatType, f: &mut fmt::Formatter) -> fmt::Result {
- write!(f, "UnorderedScan: [{:?}]", self.stream_ctx.parts)
+ fn fmt_as(&self, t: DisplayFormatType, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "UnorderedScan: ")?;
+ self.stream_ctx.format_parts(t, f)
}
}
@@ -254,73 +230,22 @@ impl UnorderedScan {
}
}
-/// List of [ScanPart]s.
-#[derive(Debug, Default)]
-struct ScanPartList(Option<Vec<ScanPart>>);
-
-impl ScanPartList {
- /// Initializes parts if they are not built yet.
- async fn maybe_init_parts(&mut self, input: &ScanInput, metrics: &mut Metrics) -> Result<()> {
- if self.0.is_none() {
- let now = Instant::now();
- let mut distributor = UnorderedDistributor::default();
- input.prune_file_ranges(&mut distributor).await?;
- self.0 = Some(distributor.build_parts(&input.memtables, input.parallelism.parallelism));
-
- metrics.build_parts_cost = now.elapsed();
- READ_STAGE_ELAPSED
- .with_label_values(&["build_parts"])
- .observe(metrics.build_parts_cost.as_secs_f64());
- }
- Ok(())
+/// Initializes parts if they are not built yet.
+async fn maybe_init_parts(
+ part_list: &mut ScanPartList,
+ input: &ScanInput,
+ metrics: &mut ScannerMetrics,
+) -> Result<()> {
+ if part_list.is_none() {
+ let now = Instant::now();
+ let mut distributor = UnorderedDistributor::default();
+ input.prune_file_ranges(&mut distributor).await?;
+ part_list
+ .set_parts(distributor.build_parts(&input.memtables, input.parallelism.parallelism));
+
+ metrics.observe_init_part(now.elapsed());
}
-
- /// Gets the part by index, returns None if the index is out of bound.
- /// # Panics
- /// Panics if parts are not initialized.
- fn get_part(&mut self, index: usize) -> Option<&ScanPart> {
- let parts = self.0.as_ref().unwrap();
- parts.get(index)
- }
-}
-
-/// Context shared by different streams.
-/// It contains the input and distributes input to multiple parts
-/// to scan.
-struct StreamContext {
- /// Input memtables and files.
- input: ScanInput,
- /// Parts to scan.
- /// The scanner builds parts to scan from the input lazily.
- /// The mutex is used to ensure the parts are only built once.
- parts: Mutex<ScanPartList>,
-
- // Metrics:
- /// The start time of the query.
- query_start: Instant,
- /// Time elapsed before creating the scanner.
- prepare_scan_cost: Duration,
-}
-
-/// Metrics for [UnorderedScan].
-// We print all fields in logs so we disable the dead_code lint.
-#[allow(dead_code)]
-#[derive(Debug, Default)]
-struct Metrics {
- /// Duration to prepare the scan task.
- prepare_scan_cost: Duration,
- /// Duration to build parts.
- build_parts_cost: Duration,
- /// Duration to scan data.
- scan_cost: Duration,
- /// Duration to convert batches.
- convert_cost: Duration,
- /// Duration of the scan.
- total_cost: Duration,
- /// Number of batches returned.
- num_batches: usize,
- /// Number of rows returned.
- num_rows: usize,
+ Ok(())
}
/// Builds [ScanPart]s without preserving order. It distributes file ranges and memtables
@@ -344,12 +269,15 @@ impl FileRangeCollector for UnorderedDistributor {
impl UnorderedDistributor {
/// Distributes file ranges and memtables across partitions according to the `parallelism`.
/// The output number of parts may be `<= parallelism`.
+ ///
+ /// [ScanPart] created by this distributor only contains one group of file ranges.
fn build_parts(self, memtables: &[MemtableRef], parallelism: usize) -> Vec<ScanPart> {
if parallelism <= 1 {
// Returns a single part.
let part = ScanPart {
memtables: memtables.to_vec(),
- file_ranges: self.file_ranges,
+ file_ranges: smallvec![self.file_ranges],
+ time_range: None,
};
return vec![part];
}
@@ -368,17 +296,19 @@ impl UnorderedDistributor {
.chunks(mems_per_part)
.map(|mems| ScanPart {
memtables: mems.to_vec(),
- file_ranges: Vec::new(),
+ file_ranges: smallvec![Vec::new()], // Ensures there is always one group.
+ time_range: None,
})
.collect::<Vec<_>>();
for (i, ranges) in self.file_ranges.chunks(ranges_per_part).enumerate() {
if i == scan_parts.len() {
scan_parts.push(ScanPart {
memtables: Vec::new(),
- file_ranges: ranges.to_vec(),
+ file_ranges: smallvec![ranges.to_vec()],
+ time_range: None,
});
} else {
- scan_parts[i].file_ranges = ranges.to_vec();
+ scan_parts[i].file_ranges = smallvec![ranges.to_vec()];
}
}
diff --git a/src/mito2/src/sst/file.rs b/src/mito2/src/sst/file.rs
index f3587dd24a64..f095859831e2 100644
--- a/src/mito2/src/sst/file.rs
+++ b/src/mito2/src/sst/file.rs
@@ -82,6 +82,15 @@ impl FromStr for FileId {
/// Time range of a SST file.
pub type FileTimeRange = (Timestamp, Timestamp);
+/// Checks if two inclusive timestamp ranges overlap with each other.
+pub(crate) fn overlaps(l: &FileTimeRange, r: &FileTimeRange) -> bool {
+ let (l, r) = if l.0 <= r.0 { (l, r) } else { (r, l) };
+ let (_, l_end) = l;
+ let (r_start, _) = r;
+
+ r_start <= l_end
+}
+
/// Metadata of a SST file.
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize, Default)]
#[serde(default)]
diff --git a/src/mito2/src/sst/parquet/file_range.rs b/src/mito2/src/sst/parquet/file_range.rs
index 5a3b074dede8..7723a996c062 100644
--- a/src/mito2/src/sst/parquet/file_range.rs
+++ b/src/mito2/src/sst/parquet/file_range.rs
@@ -28,6 +28,7 @@ use crate::error::{FieldTypeMismatchSnafu, FilterRecordBatchSnafu, Result};
use crate::read::compat::CompatBatch;
use crate::read::Batch;
use crate::row_converter::{McmpRowCodec, RowCodec};
+use crate::sst::file::FileHandle;
use crate::sst::parquet::format::ReadFormat;
use crate::sst::parquet::reader::{RowGroupReader, RowGroupReaderBuilder, SimpleFilterContext};
@@ -72,6 +73,11 @@ impl FileRange {
pub(crate) fn compat_batch(&self) -> Option<&CompatBatch> {
self.context.compat_batch()
}
+
+ /// Returns the file handle of the file range.
+ pub(crate) fn file_handle(&self) -> &FileHandle {
+ self.context.reader_builder.file_handle()
+ }
}
/// Context shared by ranges of the same parquet SST.
diff --git a/src/mito2/src/sst/parquet/reader.rs b/src/mito2/src/sst/parquet/reader.rs
index 59f68bf1b279..db2eb5b9cf8d 100644
--- a/src/mito2/src/sst/parquet/reader.rs
+++ b/src/mito2/src/sst/parquet/reader.rs
@@ -62,7 +62,7 @@ use crate::sst::parquet::stats::RowGroupPruningStats;
use crate::sst::parquet::{DEFAULT_READ_BATCH_SIZE, PARQUET_METADATA_KEY};
/// Parquet SST reader builder.
-pub(crate) struct ParquetReaderBuilder {
+pub struct ParquetReaderBuilder {
/// SST directory.
file_dir: String,
file_handle: FileHandle,
@@ -138,7 +138,7 @@ impl ParquetReaderBuilder {
/// Attaches the index applier to the builder.
#[must_use]
- pub fn index_applier(mut self, index_applier: Option<SstIndexApplierRef>) -> Self {
+ pub(crate) fn index_applier(mut self, index_applier: Option<SstIndexApplierRef>) -> Self {
self.index_applier = index_applier;
self
}
@@ -570,6 +570,11 @@ impl RowGroupReaderBuilder {
&self.file_path
}
+ /// Handle of the file to read.
+ pub(crate) fn file_handle(&self) -> &FileHandle {
+ &self.file_handle
+ }
+
/// Builds a [ParquetRecordBatchReader] to read the row group at `row_group_idx`.
pub(crate) async fn build(
&self,
diff --git a/src/mito2/src/test_util/memtable_util.rs b/src/mito2/src/test_util/memtable_util.rs
index b2764ba88957..b3d1898c5bc6 100644
--- a/src/mito2/src/test_util/memtable_util.rs
+++ b/src/mito2/src/test_util/memtable_util.rs
@@ -19,6 +19,7 @@ use std::sync::Arc;
use api::helper::ColumnDataTypeWrapper;
use api::v1::value::ValueData;
use api::v1::{Row, Rows, SemanticType};
+use common_time::Timestamp;
use datatypes::arrow::array::UInt64Array;
use datatypes::data_type::ConcreteDataType;
use datatypes::scalars::ScalarVector;
@@ -42,12 +43,26 @@ use crate::row_converter::{McmpRowCodec, RowCodec, SortField};
pub(crate) struct EmptyMemtable {
/// Id of this memtable.
id: MemtableId,
+ /// Time range to return.
+ time_range: Option<(Timestamp, Timestamp)>,
}
impl EmptyMemtable {
/// Returns a new memtable with specific `id`.
pub(crate) fn new(id: MemtableId) -> EmptyMemtable {
- EmptyMemtable { id }
+ EmptyMemtable {
+ id,
+ time_range: None,
+ }
+ }
+
+ /// Attaches the time range to the memtable.
+ pub(crate) fn with_time_range(
+ mut self,
+ time_range: Option<(Timestamp, Timestamp)>,
+ ) -> EmptyMemtable {
+ self.time_range = time_range;
+ self
}
}
@@ -81,7 +96,7 @@ impl Memtable for EmptyMemtable {
}
fn stats(&self) -> MemtableStats {
- MemtableStats::default()
+ MemtableStats::default().with_time_range(self.time_range)
}
fn fork(&self, id: MemtableId, _metadata: &RegionMetadataRef) -> MemtableRef {
diff --git a/src/table/src/table/scan.rs b/src/table/src/table/scan.rs
index a68e0221ce69..df71efeaeab6 100644
--- a/src/table/src/table/scan.rs
+++ b/src/table/src/table/scan.rs
@@ -50,9 +50,16 @@ impl RegionScanExec {
pub fn new(scanner: RegionScannerRef) -> Self {
let arrow_schema = scanner.schema().arrow_schema().clone();
let scanner_props = scanner.properties();
+ let mut num_output_partition = scanner_props.partitioning().num_partitions();
+ // The meaning of word "partition" is different in different context. For datafusion
+ // it's about "parallelism" and for storage it's about "data range". Thus here we add
+ // a special case to handle the situation where the number of storage partition is 0.
+ if num_output_partition == 0 {
+ num_output_partition = 1;
+ }
let properties = PlanProperties::new(
EquivalenceProperties::new(arrow_schema.clone()),
- Partitioning::UnknownPartitioning(scanner_props.partitioning().num_partitions()),
+ Partitioning::UnknownPartitioning(num_output_partition),
ExecutionMode::Bounded,
);
Self {
@@ -122,9 +129,9 @@ impl ExecutionPlan for RegionScanExec {
}
impl DisplayAs for RegionScanExec {
- fn fmt_as(&self, _t: DisplayFormatType, f: &mut std::fmt::Formatter) -> std::fmt::Result {
+ fn fmt_as(&self, t: DisplayFormatType, f: &mut std::fmt::Formatter) -> std::fmt::Result {
// The scanner contains all information needed to display the plan.
- write!(f, "{:?}", self.scanner)
+ self.scanner.fmt_as(t, f)
}
}
diff --git a/tests-integration/src/grpc.rs b/tests-integration/src/grpc.rs
index b3e26b9bb5cb..5975138431ef 100644
--- a/tests-integration/src/grpc.rs
+++ b/tests-integration/src/grpc.rs
@@ -652,7 +652,7 @@ CREATE TABLE {table_name} (
let request = Request::Query(QueryRequest {
query: Some(Query::Sql(
- "SELECT ts, a, b FROM auto_created_table".to_string(),
+ "SELECT ts, a, b FROM auto_created_table order by ts".to_string(),
)),
});
let output = query(instance, request.clone()).await;
diff --git a/tests/cases/distributed/explain/analyze.result b/tests/cases/distributed/explain/analyze.result
index 9adcf9eb212c..26f55d9a2470 100644
--- a/tests/cases/distributed/explain/analyze.result
+++ b/tests/cases/distributed/explain/analyze.result
@@ -35,7 +35,7 @@ explain analyze SELECT count(*) FROM system_metrics;
|_|_|_CoalescePartitionsExec REDACTED
|_|_|_AggregateExec: mode=Partial, gby=[], aggr=[COUNT(system_REDACTED
|_|_|_RepartitionExec: partitioning=REDACTED
-|_|_|_SinglePartitionScanner: <SendableRecordBatchStream> REDACTED
+|_|_|_SeqScan: partition_count=1 (1 memtables, 0 file ranges) REDACTED
|_|_|_|
|_|_| Total rows: 1_|
+-+-+-+
diff --git a/tests/cases/standalone/common/alter/alter_table_default.result b/tests/cases/standalone/common/alter/alter_table_default.result
index 95d0358c8755..40801bd2e7d7 100644
--- a/tests/cases/standalone/common/alter/alter_table_default.result
+++ b/tests/cases/standalone/common/alter/alter_table_default.result
@@ -25,6 +25,7 @@ INSERT INTO test1 values (3, 3, DEFAULT), (4, 4, '2024-01-31 00:01:01');
Affected Rows: 2
+-- SQLNESS SORT_RESULT 3 1
SELECT i, ts1 FROM test1;
+---+---------------------+
@@ -49,6 +50,7 @@ INSERT INTO test1 values (5, 5, DEFAULT, DEFAULT), (6, 6, DEFAULT, '2024-01-31 0
Affected Rows: 2
+-- SQLNESS SORT_RESULT 3 1
SELECT i, ts1, ts2 FROM test1;
+---+---------------------+---------------------+
diff --git a/tests/cases/standalone/common/alter/alter_table_default.sql b/tests/cases/standalone/common/alter/alter_table_default.sql
index 3169963b7a44..8789b2da571a 100644
--- a/tests/cases/standalone/common/alter/alter_table_default.sql
+++ b/tests/cases/standalone/common/alter/alter_table_default.sql
@@ -11,6 +11,7 @@ ALTER TABLE test1 ADD COLUMN ts1 TIMESTAMP DEFAULT '2024-01-30 00:01:01' PRIMARY
INSERT INTO test1 values (3, 3, DEFAULT), (4, 4, '2024-01-31 00:01:01');
+-- SQLNESS SORT_RESULT 3 1
SELECT i, ts1 FROM test1;
SET time_zone = 'Asia/Shanghai';
@@ -20,6 +21,7 @@ ALTER TABLE test1 ADD COLUMN ts2 TIMESTAMP DEFAULT '2024-01-30 00:01:01' PRIMARY
INSERT INTO test1 values (5, 5, DEFAULT, DEFAULT), (6, 6, DEFAULT, '2024-01-31 00:01:01');
+-- SQLNESS SORT_RESULT 3 1
SELECT i, ts1, ts2 FROM test1;
SET time_zone = 'UTC';
diff --git a/tests/cases/standalone/common/alter/alter_table_first_after.result b/tests/cases/standalone/common/alter/alter_table_first_after.result
index ba7e0ade6c4c..ff13ec388d1c 100644
--- a/tests/cases/standalone/common/alter/alter_table_first_after.result
+++ b/tests/cases/standalone/common/alter/alter_table_first_after.result
@@ -108,6 +108,7 @@ DESC TABLE t;
| m | Int32 | | YES | | FIELD |
+--------+----------------------+-----+------+---------+---------------+
+-- SQLNESS SORT_RESULT 3 1
SELECT * FROM t;
+---+---+-------------------------+---+---+---+
@@ -155,6 +156,7 @@ DESC TABLE t;
| m | Int32 | | YES | | FIELD |
+--------+----------------------+-----+------+---------+---------------+
+-- SQLNESS SORT_RESULT 3 1
SELECT * FROM t;
+---+---+---+-------------------------+---+---+---+---+
diff --git a/tests/cases/standalone/common/alter/alter_table_first_after.sql b/tests/cases/standalone/common/alter/alter_table_first_after.sql
index a2106230baca..0713d660bdb5 100644
--- a/tests/cases/standalone/common/alter/alter_table_first_after.sql
+++ b/tests/cases/standalone/common/alter/alter_table_first_after.sql
@@ -29,6 +29,7 @@ ALTER TABLE t ADD COLUMN y INTEGER AFTER j;
DESC TABLE t;
+-- SQLNESS SORT_RESULT 3 1
SELECT * FROM t;
-- SQLNESS ARG restart=true
@@ -40,6 +41,7 @@ ALTER TABLE t ADD COLUMN b INTEGER AFTER j;
DESC TABLE t;
+-- SQLNESS SORT_RESULT 3 1
SELECT * FROM t;
ALTER TABLE t ADD COLUMN x int xxx;
diff --git a/tests/cases/standalone/common/alter/change_col_type.result b/tests/cases/standalone/common/alter/change_col_type.result
index 3d9500105a6d..f0a641d28bc2 100644
--- a/tests/cases/standalone/common/alter/change_col_type.result
+++ b/tests/cases/standalone/common/alter/change_col_type.result
@@ -39,6 +39,7 @@ INSERT INTO test VALUES (3, "greptime", 3, true);
Affected Rows: 1
+-- SQLNESS SORT_RESULT 3 1
SELECT * FROM test;
+----+----------+-------------------------+-------+
@@ -64,6 +65,7 @@ ALTER TABLE test MODIFY I INTEGER;
Affected Rows: 0
+-- SQLNESS SORT_RESULT 3 1
SELECT * FROM test;
+----+---+-------------------------+-------+
diff --git a/tests/cases/standalone/common/alter/change_col_type.sql b/tests/cases/standalone/common/alter/change_col_type.sql
index 1eb95c719cdc..0fe8c28e9b90 100644
--- a/tests/cases/standalone/common/alter/change_col_type.sql
+++ b/tests/cases/standalone/common/alter/change_col_type.sql
@@ -16,12 +16,14 @@ SELECT * FROM test;
INSERT INTO test VALUES (3, "greptime", 3, true);
+-- SQLNESS SORT_RESULT 3 1
SELECT * FROM test;
DESCRIBE test;
ALTER TABLE test MODIFY I INTEGER;
+-- SQLNESS SORT_RESULT 3 1
SELECT * FROM test;
DESCRIBE test;
diff --git a/tests/cases/standalone/common/alter/drop_col_not_null.result b/tests/cases/standalone/common/alter/drop_col_not_null.result
index 86a6f3150eeb..783220c79e45 100644
--- a/tests/cases/standalone/common/alter/drop_col_not_null.result
+++ b/tests/cases/standalone/common/alter/drop_col_not_null.result
@@ -23,6 +23,7 @@ INSERT INTO test VALUES (3);
Affected Rows: 1
+-- SQLNESS SORT_RESULT 3 1
SELECT * FROM test;
+-------------------------+
diff --git a/tests/cases/standalone/common/alter/drop_col_not_null.sql b/tests/cases/standalone/common/alter/drop_col_not_null.sql
index ff98f350d3fe..b7700d830920 100644
--- a/tests/cases/standalone/common/alter/drop_col_not_null.sql
+++ b/tests/cases/standalone/common/alter/drop_col_not_null.sql
@@ -8,6 +8,7 @@ ALTER TABLE test DROP COLUMN j;
INSERT INTO test VALUES (3);
+-- SQLNESS SORT_RESULT 3 1
SELECT * FROM test;
DROP TABLE test;
diff --git a/tests/cases/standalone/common/alter/drop_col_not_null_next.result b/tests/cases/standalone/common/alter/drop_col_not_null_next.result
index 049aa8d0df5b..74952f49b35b 100644
--- a/tests/cases/standalone/common/alter/drop_col_not_null_next.result
+++ b/tests/cases/standalone/common/alter/drop_col_not_null_next.result
@@ -28,6 +28,7 @@ INSERT INTO test VALUES (3, 13);
Affected Rows: 1
+-- SQLNESS SORT_RESULT 3 1
SELECT * FROM test;
+-------------------------+----+
diff --git a/tests/cases/standalone/common/alter/drop_col_not_null_next.sql b/tests/cases/standalone/common/alter/drop_col_not_null_next.sql
index 84850539e74c..bdd8274d9f16 100644
--- a/tests/cases/standalone/common/alter/drop_col_not_null_next.sql
+++ b/tests/cases/standalone/common/alter/drop_col_not_null_next.sql
@@ -11,6 +11,7 @@ INSERT INTO test VALUES (3, NULL);
INSERT INTO test VALUES (3, 13);
+-- SQLNESS SORT_RESULT 3 1
SELECT * FROM test;
DROP TABLE test;
diff --git a/tests/cases/standalone/common/range/nest.result b/tests/cases/standalone/common/range/nest.result
index 5ccc155b05fe..59b12671aea8 100644
--- a/tests/cases/standalone/common/range/nest.result
+++ b/tests/cases/standalone/common/range/nest.result
@@ -74,7 +74,7 @@ EXPLAIN ANALYZE SELECT ts, host, min(val) RANGE '5s' FROM host ALIGN '5s';
| 0_| 0_|_RangeSelectExec: range_expr=[MIN(host.val) RANGE 5s], align=5000ms, align_to=0ms, align_by=[host@1], time_index=ts REDACTED
|_|_|_MergeScanExec: REDACTED
|_|_|_|
-| 1_| 0_|_SinglePartitionScanner: <SendableRecordBatchStream> REDACTED
+| 1_| 0_|_SeqScan: partition_count=1 (1 memtables, 0 file ranges) REDACTED
|_|_|_|
|_|_| Total rows: 10_|
+-+-+-+
diff --git a/tests/cases/standalone/common/tql-explain-analyze/analyze.result b/tests/cases/standalone/common/tql-explain-analyze/analyze.result
index e3bbc84e4257..8fb7eb2144f0 100644
--- a/tests/cases/standalone/common/tql-explain-analyze/analyze.result
+++ b/tests/cases/standalone/common/tql-explain-analyze/analyze.result
@@ -30,7 +30,7 @@ TQL ANALYZE (0, 10, '5s') test;
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|_|_|_FilterExec: j@1 >= -300000 AND j@1 <= 310000 REDACTED
|_|_|_RepartitionExec: partitioning=REDACTED
-|_|_|_SinglePartitionScanner: <SendableRecordBatchStream> REDACTED
+|_|_|_SeqScan: partition_count=1 (1 memtables, 0 file ranges) REDACTED
|_|_|_|
|_|_| Total rows: 4_|
+-+-+-+
@@ -59,7 +59,7 @@ TQL ANALYZE (0, 10, '1s', '2s') test;
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|_|_|_FilterExec: j@1 >= -2000 AND j@1 <= 12000 REDACTED
|_|_|_RepartitionExec: partitioning=REDACTED
-|_|_|_SinglePartitionScanner: <SendableRecordBatchStream> REDACTED
+|_|_|_SeqScan: partition_count=1 (1 memtables, 0 file ranges) REDACTED
|_|_|_|
|_|_| Total rows: 4_|
+-+-+-+
@@ -87,7 +87,7 @@ TQL ANALYZE ('1970-01-01T00:00:00'::timestamp, '1970-01-01T00:00:00'::timestamp
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|_|_|_FilterExec: j@1 >= -300000 AND j@1 <= 310000 REDACTED
|_|_|_RepartitionExec: partitioning=REDACTED
-|_|_|_SinglePartitionScanner: <SendableRecordBatchStream> REDACTED
+|_|_|_SeqScan: partition_count=1 (1 memtables, 0 file ranges) REDACTED
|_|_|_|
|_|_| Total rows: 4_|
+-+-+-+
@@ -117,7 +117,7 @@ TQL ANALYZE VERBOSE (0, 10, '5s') test;
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|_|_|_FilterExec: j@1 >= -300000 AND j@1 <= 310000 REDACTED
|_|_|_RepartitionExec: partitioning=REDACTED
-|_|_|_SinglePartitionScanner: <SendableRecordBatchStream> REDACTED
+|_|_|_SeqScan: partition_count=1 (1 memtables, 0 file ranges) REDACTED
|_|_|_|
|_|_| Total rows: 4_|
+-+-+-+
|
feat
|
Implement RegionScanner for SeqScan (#4060)
|
eab9e3a48dd4d5a3bbb6ec4223eac516ac5a9a4c
|
2024-10-28 14:20:10
|
Lei, HUANG
|
chore: remove struct size assertion (#4885)
| false
|
diff --git a/src/mito2/src/cache/cache_size.rs b/src/mito2/src/cache/cache_size.rs
index 3d79bcbe5801..a60b203bd758 100644
--- a/src/mito2/src/cache/cache_size.rs
+++ b/src/mito2/src/cache/cache_size.rs
@@ -127,16 +127,3 @@ fn parquet_offset_index_heap_size(offset_index: &ParquetOffsetIndex) -> usize {
})
.sum()
}
-
-#[cfg(test)]
-mod tests {
- use super::*;
- use crate::cache::test_util::parquet_meta;
-
- #[test]
- fn test_parquet_meta_size() {
- let metadata = parquet_meta();
-
- assert_eq!(956, parquet_meta_size(&metadata));
- }
-}
|
chore
|
remove struct size assertion (#4885)
|
9bf9aa10822a421a7507cc70470a10bafaf1bb25
|
2025-01-03 21:11:00
|
Yingwen
|
chore: update greptime-proto to include add_if_not_exists (#5289)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 51d9c78218d5..7530aa0f4da5 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4593,7 +4593,7 @@ dependencies = [
[[package]]
name = "greptime-proto"
version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=7c01e4a8e64580707438dabc5cf7f4e2584c28b6#7c01e4a8e64580707438dabc5cf7f4e2584c28b6"
+source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=43ddd8dea69f4df0fe2e8b5cdc0044d2cfa35908#43ddd8dea69f4df0fe2e8b5cdc0044d2cfa35908"
dependencies = [
"prost 0.12.6",
"serde",
diff --git a/Cargo.toml b/Cargo.toml
index 2320ebaeafa2..73571315fe34 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -126,7 +126,7 @@ etcd-client = "0.13"
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
-greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "7c01e4a8e64580707438dabc5cf7f4e2584c28b6" }
+greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "43ddd8dea69f4df0fe2e8b5cdc0044d2cfa35908" }
hex = "0.4"
http = "0.2"
humantime = "2.1"
|
chore
|
update greptime-proto to include add_if_not_exists (#5289)
|
0d9e71b6533b7124c0c446728d31073fe1477ab5
|
2024-05-16 17:07:14
|
discord9
|
feat(flow): flow node manager (#3954)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 37fd88cb4613..3d572aa3e832 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1629,6 +1629,7 @@ dependencies = [
"either",
"etcd-client",
"file-engine",
+ "flow",
"frontend",
"futures",
"human-panic",
@@ -10683,6 +10684,7 @@ dependencies = [
"datanode",
"datatypes",
"dotenv",
+ "flow",
"frontend",
"futures",
"futures-util",
diff --git a/src/cmd/Cargo.toml b/src/cmd/Cargo.toml
index 7052df92244e..a11a9a01974a 100644
--- a/src/cmd/Cargo.toml
+++ b/src/cmd/Cargo.toml
@@ -45,6 +45,7 @@ datatypes.workspace = true
either = "1.8"
etcd-client.workspace = true
file-engine.workspace = true
+flow.workspace = true
frontend.workspace = true
futures.workspace = true
human-panic = "1.2.2"
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index ff810ead5d41..21901e9b967a 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -45,6 +45,7 @@ use common_wal::config::StandaloneWalConfig;
use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig};
use datanode::datanode::{Datanode, DatanodeBuilder};
use file_engine::config::EngineConfig as FileEngineConfig;
+use flow::FlownodeBuilder;
use frontend::frontend::FrontendOptions;
use frontend::instance::builder::FrontendBuilder;
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
@@ -426,11 +427,26 @@ impl StartCommand {
)
.await;
+ let table_metadata_manager =
+ Self::create_table_metadata_manager(kv_backend.clone()).await?;
+
+ let flow_builder = FlownodeBuilder::new(
+ 1,
+ Default::default(),
+ fe_plugins.clone(),
+ table_metadata_manager.clone(),
+ catalog_manager.clone(),
+ );
+ let flownode = Arc::new(flow_builder.build().await);
+
let builder =
DatanodeBuilder::new(dn_opts, fe_plugins.clone()).with_kv_backend(kv_backend.clone());
let datanode = builder.build().await.context(StartDatanodeSnafu)?;
- let node_manager = Arc::new(StandaloneDatanodeManager(datanode.region_server()));
+ let node_manager = Arc::new(StandaloneDatanodeManager {
+ region_server: datanode.region_server(),
+ flow_server: flownode.clone(),
+ });
let table_id_sequence = Arc::new(
SequenceBuilder::new(TABLE_ID_SEQ, kv_backend.clone())
@@ -448,8 +464,6 @@ impl StartCommand {
opts.wal.into(),
kv_backend.clone(),
));
- let table_metadata_manager =
- Self::create_table_metadata_manager(kv_backend.clone()).await?;
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(kv_backend.clone()));
let table_meta_allocator = Arc::new(TableMetadataAllocator::new(
table_id_sequence,
@@ -482,6 +496,13 @@ impl StartCommand {
.await
.context(StartFrontendSnafu)?;
+ // flow server need to be able to use frontend to write insert requests back
+ flownode
+ .set_frontend_invoker(Box::new(frontend.clone()))
+ .await;
+ // TODO(discord9): unify with adding `start` and `shutdown` method to flownode too.
+ let _handle = flownode.clone().run_background();
+
let servers = Services::new(fe_opts.clone(), Arc::new(frontend.clone()), fe_plugins)
.build()
.await
diff --git a/src/flow/src/adapter.rs b/src/flow/src/adapter.rs
index 33b05ddec12b..f440043f66fe 100644
--- a/src/flow/src/adapter.rs
+++ b/src/flow/src/adapter.rs
@@ -14,19 +14,657 @@
//! for getting data from source and sending results to sink
//! and communicating with other parts of the database
+#![warn(unused_imports)]
+
+use std::collections::{BTreeMap, HashMap};
+use std::sync::Arc;
+use std::time::Instant;
+
+use api::v1::{RowDeleteRequest, RowDeleteRequests, RowInsertRequest, RowInsertRequests};
+use catalog::CatalogManagerRef;
+use common_base::Plugins;
+use common_error::ext::BoxedError;
+use common_frontend::handler::FrontendInvoker;
+use common_meta::key::TableMetadataManagerRef;
+use common_runtime::JoinHandle;
+use common_telemetry::{debug, info};
+use datatypes::schema::ColumnSchema;
+use datatypes::value::Value;
+use greptime_proto::v1;
+use itertools::Itertools;
+use query::{QueryEngine, QueryEngineFactory};
+use serde::{Deserialize, Serialize};
+use session::context::QueryContext;
+use snafu::{OptionExt, ResultExt};
+use store_api::storage::{ConcreteDataType, RegionId};
+use table::metadata::TableId;
+use tokio::sync::{oneshot, watch, Mutex, RwLock};
+
+use crate::adapter::error::{ExternalSnafu, TableNotFoundSnafu, UnexpectedSnafu};
+pub(crate) use crate::adapter::node_context::FlownodeContext;
+use crate::adapter::parse_expr::parse_fixed;
+use crate::adapter::table_source::TableSource;
+use crate::adapter::util::column_schemas_to_proto;
+use crate::adapter::worker::{create_worker, Worker, WorkerHandle};
+use crate::compute::ErrCollector;
+use crate::expr::GlobalId;
+use crate::repr::{self, DiffRow, Row};
+use crate::transform::sql_to_flow_plan;
pub(crate) mod error;
-pub(crate) mod node_context;
-mod table_source;
+mod flownode_impl;
+mod parse_expr;
+mod server;
+#[cfg(test)]
+mod tests;
mod util;
+mod worker;
-pub(crate) use node_context::FlownodeContext;
-pub(crate) use table_source::TableSource;
+pub(crate) mod node_context;
+mod table_source;
-mod worker;
+use error::Error;
pub const PER_REQ_MAX_ROW_CNT: usize = 8192;
+
// TODO: refactor common types for flow to a separate module
/// FlowId is a unique identifier for a flow task
-pub type FlowId = u32;
+pub type FlowId = u64;
pub type TableName = [String; 3];
+
+/// Options for flow node
+#[derive(Clone, Default, Debug, Serialize, Deserialize)]
+#[serde(default)]
+pub struct FlownodeOptions {
+ /// rpc address
+ pub rpc_addr: String,
+}
+
+/// Flownode Builder
+pub struct FlownodeBuilder {
+ flow_node_id: u32,
+ opts: FlownodeOptions,
+ plugins: Plugins,
+ table_meta: TableMetadataManagerRef,
+ catalog_manager: CatalogManagerRef,
+}
+
+impl FlownodeBuilder {
+ /// init flownode builder
+ pub fn new(
+ flow_node_id: u32,
+ opts: FlownodeOptions,
+ plugins: Plugins,
+ table_meta: TableMetadataManagerRef,
+ catalog_manager: CatalogManagerRef,
+ ) -> Self {
+ Self {
+ flow_node_id,
+ opts,
+ plugins,
+ table_meta,
+ catalog_manager,
+ }
+ }
+
+ /// TODO(discord9): error handling
+ pub async fn build(self) -> FlownodeManager {
+ let query_engine_factory = QueryEngineFactory::new_with_plugins(
+ // query engine in flownode only translate plan with resolved table source.
+ self.catalog_manager.clone(),
+ None,
+ None,
+ None,
+ false,
+ self.plugins.clone(),
+ );
+ let query_engine = query_engine_factory.query_engine();
+
+ let (tx, rx) = oneshot::channel();
+
+ let node_id = Some(self.flow_node_id);
+
+ let _handle = std::thread::spawn(move || {
+ let (flow_node_manager, mut worker) =
+ FlownodeManager::new_with_worker(node_id, query_engine, self.table_meta.clone());
+ let _ = tx.send(flow_node_manager);
+ info!("Flow Worker started in new thread");
+ worker.run();
+ });
+ let man = rx.await.unwrap();
+ info!("Flow Node Manager started");
+ man
+ }
+}
+
+/// Arc-ed FlowNodeManager, cheaper to clone
+pub type FlownodeManagerRef = Arc<FlownodeManager>;
+
+/// FlowNodeManager manages the state of all tasks in the flow node, which should be run on the same thread
+///
+/// The choice of timestamp is just using current system timestamp for now
+pub struct FlownodeManager {
+ /// The handler to the worker that will run the dataflow
+ /// which is `!Send` so a handle is used
+ pub worker_handles: Vec<Mutex<WorkerHandle>>,
+ /// The query engine that will be used to parse the query and convert it to a dataflow plan
+ query_engine: Arc<dyn QueryEngine>,
+ /// Getting table name and table schema from table info manager
+ table_info_source: TableSource,
+ frontend_invoker: RwLock<Option<Box<dyn FrontendInvoker + Send + Sync>>>,
+ /// contains mapping from table name to global id, and table schema
+ node_context: Mutex<FlownodeContext>,
+ flow_err_collectors: RwLock<BTreeMap<FlowId, ErrCollector>>,
+ src_send_buf_lens: RwLock<BTreeMap<TableId, watch::Receiver<usize>>>,
+ tick_manager: FlowTickManager,
+ node_id: Option<u32>,
+}
+
+/// Building FlownodeManager
+impl FlownodeManager {
+ /// set frontend invoker
+ pub async fn set_frontend_invoker(
+ self: &Arc<Self>,
+ frontend: Box<dyn FrontendInvoker + Send + Sync>,
+ ) {
+ *self.frontend_invoker.write().await = Some(frontend);
+ }
+
+ /// Create **without** setting `frontend_invoker`
+ pub fn new(
+ node_id: Option<u32>,
+ query_engine: Arc<dyn QueryEngine>,
+ table_meta: TableMetadataManagerRef,
+ ) -> Self {
+ let srv_map = TableSource::new(
+ table_meta.table_info_manager().clone(),
+ table_meta.table_name_manager().clone(),
+ );
+ let node_context = FlownodeContext::default();
+ let tick_manager = FlowTickManager::new();
+ let worker_handles = Vec::new();
+ FlownodeManager {
+ worker_handles,
+ query_engine,
+ table_info_source: srv_map,
+ frontend_invoker: RwLock::new(None),
+ node_context: Mutex::new(node_context),
+ flow_err_collectors: Default::default(),
+ src_send_buf_lens: Default::default(),
+ tick_manager,
+ node_id,
+ }
+ }
+
+ /// Create a flownode manager with one worker
+ pub fn new_with_worker<'s>(
+ node_id: Option<u32>,
+ query_engine: Arc<dyn QueryEngine>,
+ table_meta: TableMetadataManagerRef,
+ ) -> (Self, Worker<'s>) {
+ let mut zelf = Self::new(node_id, query_engine, table_meta);
+ let (handle, worker) = create_worker();
+ zelf.add_worker_handle(handle);
+ (zelf, worker)
+ }
+
+ /// add a worker handler to manager, meaning this corresponding worker is under it's manage
+ pub fn add_worker_handle(&mut self, handle: WorkerHandle) {
+ self.worker_handles.push(Mutex::new(handle));
+ }
+}
+
+#[derive(Debug)]
+pub enum DiffRequest {
+ Insert(Vec<(Row, repr::Timestamp)>),
+ Delete(Vec<(Row, repr::Timestamp)>),
+}
+
+/// iterate through the diff row and form continuous diff row with same diff type
+pub fn diff_row_to_request(rows: Vec<DiffRow>) -> Vec<DiffRequest> {
+ let mut reqs = Vec::new();
+ for (row, ts, diff) in rows {
+ let last = reqs.last_mut();
+ match (last, diff) {
+ (Some(DiffRequest::Insert(rows)), 1) => {
+ rows.push((row, ts));
+ }
+ (Some(DiffRequest::Insert(_)), -1) => reqs.push(DiffRequest::Delete(vec![(row, ts)])),
+ (Some(DiffRequest::Delete(rows)), -1) => {
+ rows.push((row, ts));
+ }
+ (Some(DiffRequest::Delete(_)), 1) => reqs.push(DiffRequest::Insert(vec![(row, ts)])),
+ (None, 1) => reqs.push(DiffRequest::Insert(vec![(row, ts)])),
+ (None, -1) => reqs.push(DiffRequest::Delete(vec![(row, ts)])),
+ _ => {}
+ }
+ }
+ reqs
+}
+
+/// This impl block contains methods to send writeback requests to frontend
+impl FlownodeManager {
+ /// TODO(discord9): merge all same type of diff row into one requests
+ ///
+ /// Return the number of requests it made
+ pub async fn send_writeback_requests(&self) -> Result<usize, Error> {
+ let all_reqs = self.generate_writeback_request().await;
+ if all_reqs.is_empty() || all_reqs.iter().all(|v| v.1.is_empty()) {
+ return Ok(0);
+ }
+ let mut req_cnt = 0;
+ for (table_name, reqs) in all_reqs {
+ if reqs.is_empty() {
+ continue;
+ }
+ let (catalog, schema) = (table_name[0].clone(), table_name[1].clone());
+ let ctx = Arc::new(QueryContext::with(&catalog, &schema));
+ // TODO(discord9): instead of auto build table from request schema, actually build table
+ // before `create flow` to be able to assign pk and ts etc.
+ let (primary_keys, schema) = if let Some(table_id) = self
+ .table_info_source
+ .get_table_id_from_name(&table_name)
+ .await?
+ {
+ let table_info = self
+ .table_info_source
+ .get_table_info_value(&table_id)
+ .await?
+ .unwrap();
+ let meta = table_info.table_info.meta;
+ let primary_keys = meta
+ .primary_key_indices
+ .into_iter()
+ .map(|i| meta.schema.column_schemas[i].name.clone())
+ .collect_vec();
+ let schema = meta.schema.column_schemas;
+ (primary_keys, schema)
+ } else {
+ // TODO(discord9): get ts column from `RelationType` once we are done rewriting flow plan to attach ts
+ let (primary_keys, schema) = {
+ let node_ctx = self.node_context.lock().await;
+ let gid: GlobalId = node_ctx
+ .table_repr
+ .get_by_name(&table_name)
+ .map(|x| x.1)
+ .unwrap();
+ let schema = node_ctx
+ .schema
+ .get(&gid)
+ .with_context(|| TableNotFoundSnafu {
+ name: format!("Table name = {:?}", table_name),
+ })?
+ .clone();
+ // TODO(discord9): use default key from schema
+ let primary_keys = schema
+ .keys
+ .first()
+ .map(|v| {
+ v.column_indices
+ .iter()
+ .map(|i| format!("Col_{i}"))
+ .collect_vec()
+ })
+ .unwrap_or_default();
+ let ts_col = ColumnSchema::new(
+ "ts",
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ true,
+ )
+ .with_time_index(true);
+
+ let wout_ts = schema
+ .column_types
+ .into_iter()
+ .enumerate()
+ .map(|(idx, typ)| {
+ ColumnSchema::new(format!("Col_{idx}"), typ.scalar_type, typ.nullable)
+ })
+ .collect_vec();
+ let mut with_ts = wout_ts.clone();
+ with_ts.push(ts_col);
+ (primary_keys, with_ts)
+ };
+ (primary_keys, schema)
+ };
+
+ let proto_schema = column_schemas_to_proto(schema, &primary_keys)?;
+
+ debug!(
+ "Sending {} writeback requests to table {}, reqs={:?}",
+ reqs.len(),
+ table_name.join("."),
+ reqs
+ );
+
+ for req in reqs {
+ match req {
+ DiffRequest::Insert(insert) => {
+ let rows_proto: Vec<v1::Row> = insert
+ .into_iter()
+ .map(|(mut row, _ts)| {
+ row.extend(Some(Value::from(
+ common_time::Timestamp::new_millisecond(0),
+ )));
+ row.into()
+ })
+ .collect::<Vec<_>>();
+ let table_name = table_name.last().unwrap().clone();
+ let req = RowInsertRequest {
+ table_name,
+ rows: Some(v1::Rows {
+ schema: proto_schema.clone(),
+ rows: rows_proto,
+ }),
+ };
+ req_cnt += 1;
+ self.frontend_invoker
+ .read()
+ .await
+ .as_ref()
+ .with_context(|| UnexpectedSnafu {
+ reason: "Expect a frontend invoker for flownode to write back",
+ })?
+ .row_inserts(RowInsertRequests { inserts: vec![req] }, ctx.clone())
+ .await
+ .map_err(BoxedError::new)
+ .with_context(|_| ExternalSnafu {})?;
+ }
+ DiffRequest::Delete(remove) => {
+ info!("original remove rows={:?}", remove);
+ let rows_proto: Vec<v1::Row> = remove
+ .into_iter()
+ .map(|(mut row, _ts)| {
+ row.extend(Some(Value::from(
+ common_time::Timestamp::new_millisecond(0),
+ )));
+ row.into()
+ })
+ .collect::<Vec<_>>();
+ let table_name = table_name.last().unwrap().clone();
+ let req = RowDeleteRequest {
+ table_name,
+ rows: Some(v1::Rows {
+ schema: proto_schema.clone(),
+ rows: rows_proto,
+ }),
+ };
+
+ req_cnt += 1;
+ self.frontend_invoker
+ .read()
+ .await
+ .as_ref()
+ .with_context(|| UnexpectedSnafu {
+ reason: "Expect a frontend invoker for flownode to write back",
+ })?
+ .row_deletes(RowDeleteRequests { deletes: vec![req] }, ctx.clone())
+ .await
+ .map_err(BoxedError::new)
+ .with_context(|_| ExternalSnafu {})?;
+ }
+ }
+ }
+ }
+ Ok(req_cnt)
+ }
+
+ /// Generate writeback request for all sink table
+ pub async fn generate_writeback_request(&self) -> BTreeMap<TableName, Vec<DiffRequest>> {
+ let mut output = BTreeMap::new();
+ for (name, sink_recv) in self
+ .node_context
+ .lock()
+ .await
+ .sink_receiver
+ .iter_mut()
+ .map(|(n, (_s, r))| (n, r))
+ {
+ let mut rows = Vec::new();
+ while let Ok(row) = sink_recv.try_recv() {
+ rows.push(row);
+ }
+ let reqs = diff_row_to_request(rows);
+ output.insert(name.clone(), reqs);
+ }
+ output
+ }
+}
+
+/// Flow Runtime related methods
+impl FlownodeManager {
+ /// run in common_runtime background runtime
+ pub fn run_background(self: Arc<Self>) -> JoinHandle<()> {
+ info!("Starting flownode manager's background task");
+ common_runtime::spawn_bg(async move {
+ self.run().await;
+ })
+ }
+
+ /// log all flow errors
+ pub async fn log_all_errors(&self) {
+ for (f_id, f_err) in self.flow_err_collectors.read().await.iter() {
+ let all_errors = f_err.get_all().await;
+ if !all_errors.is_empty() {
+ let all_errors = all_errors
+ .into_iter()
+ .map(|i| format!("{:?}", i))
+ .join("\n");
+ common_telemetry::error!("Flow {} has following errors: {}", f_id, all_errors);
+ }
+ }
+ }
+
+ /// Trigger dataflow running, and then send writeback request to the source sender
+ ///
+ /// note that this method didn't handle input mirror request, as this should be handled by grpc server
+ pub async fn run(&self) {
+ debug!("Starting to run");
+ loop {
+ // TODO(discord9): only run when new inputs arrive or scheduled to
+ self.run_available().await.unwrap();
+ // TODO(discord9): error handling
+ self.send_writeback_requests().await.unwrap();
+ self.log_all_errors().await;
+ tokio::time::sleep(std::time::Duration::from_secs(1)).await;
+ }
+ }
+
+ /// Run all available subgraph in the flow node
+ /// This will try to run all dataflow in this node
+ ///
+ /// However this is not blocking and can sometimes return while actual computation is still running in worker thread
+ /// TODO(discord9): add flag for subgraph that have input since last run
+ pub async fn run_available(&self) -> Result<(), Error> {
+ let now = self.tick_manager.tick();
+
+ loop {
+ for worker in self.worker_handles.iter() {
+ // TODO(discord9): consider how to handle error in individual worker
+ worker.lock().await.run_available(now).await.unwrap();
+ }
+ // first check how many inputs were sent
+ let send_cnt = match self.node_context.lock().await.flush_all_sender() {
+ Ok(cnt) => cnt,
+ Err(err) => {
+ common_telemetry::error!("Flush send buf errors: {:?}", err);
+ break;
+ }
+ };
+ // if no inputs
+ if send_cnt == 0 {
+ break;
+ } else {
+ debug!("FlownodeManager::run_available: send_cnt={}", send_cnt);
+ }
+ }
+
+ Ok(())
+ }
+
+ /// send write request to related source sender
+ pub async fn handle_write_request(
+ &self,
+ region_id: RegionId,
+ rows: Vec<DiffRow>,
+ ) -> Result<(), Error> {
+ debug!(
+ "Handling write request for region_id={:?} with {} rows",
+ region_id,
+ rows.len()
+ );
+ let table_id = region_id.table_id();
+ self.node_context.lock().await.send(table_id, rows)?;
+ Ok(())
+ }
+}
+
+/// Create&Remove flow
+impl FlownodeManager {
+ /// remove a flow by it's id
+ pub async fn remove_flow(&self, flow_id: FlowId) -> Result<(), Error> {
+ for handle in self.worker_handles.iter() {
+ let handle = handle.lock().await;
+ if handle.contains_flow(flow_id).await? {
+ handle.remove_flow(flow_id).await?;
+ break;
+ }
+ }
+ Ok(())
+ }
+
+ /// Return task id if a new task is created, otherwise return None
+ ///
+ /// steps to create task:
+ /// 1. parse query into typed plan(and optional parse expire_when expr)
+ /// 2. render source/sink with output table id and used input table id
+ #[allow(clippy::too_many_arguments)]
+ pub async fn create_flow(
+ &self,
+ flow_id: FlowId,
+ sink_table_name: TableName,
+ source_table_ids: &[TableId],
+ create_if_not_exist: bool,
+ expire_when: Option<String>,
+ comment: Option<String>,
+ sql: String,
+ flow_options: HashMap<String, String>,
+ query_ctx: Option<QueryContext>,
+ ) -> Result<Option<FlowId>, Error> {
+ if create_if_not_exist {
+ // check if the task already exists
+ for handle in self.worker_handles.iter() {
+ if handle.lock().await.contains_flow(flow_id).await? {
+ return Ok(None);
+ }
+ }
+ }
+
+ let mut node_ctx = self.node_context.lock().await;
+ // assign global id to source and sink table
+ for source in source_table_ids {
+ node_ctx
+ .assign_global_id_to_table(&self.table_info_source, None, Some(*source))
+ .await?;
+ }
+ node_ctx
+ .assign_global_id_to_table(&self.table_info_source, Some(sink_table_name.clone()), None)
+ .await?;
+
+ node_ctx.register_task_src_sink(flow_id, source_table_ids, sink_table_name.clone());
+
+ node_ctx.query_context = query_ctx.map(Arc::new);
+ // construct a active dataflow state with it
+ let flow_plan = sql_to_flow_plan(&mut node_ctx, &self.query_engine, &sql).await?;
+ debug!("Flow {:?}'s Plan is {:?}", flow_id, flow_plan);
+ node_ctx.assign_table_schema(&sink_table_name, flow_plan.typ.clone())?;
+
+ let expire_when = expire_when
+ .and_then(|s| {
+ if s.is_empty() || s.split_whitespace().join("").is_empty() {
+ None
+ } else {
+ Some(s)
+ }
+ })
+ .map(|d| {
+ let d = d.as_ref();
+ parse_fixed(d)
+ .map(|(_, n)| n)
+ .map_err(|err| err.to_string())
+ })
+ .transpose()
+ .map_err(|err| UnexpectedSnafu { reason: err }.build())?;
+ let _ = comment;
+ let _ = flow_options;
+
+ // TODO(discord9): add more than one handles
+ let sink_id = node_ctx.table_repr.get_by_name(&sink_table_name).unwrap().1;
+ let sink_sender = node_ctx.get_sink_by_global_id(&sink_id)?;
+
+ let source_ids = source_table_ids
+ .iter()
+ .map(|id| node_ctx.table_repr.get_by_table_id(id).unwrap().1)
+ .collect_vec();
+ let source_receivers = source_ids
+ .iter()
+ .map(|id| {
+ node_ctx
+ .get_source_by_global_id(id)
+ .map(|s| s.get_receiver())
+ })
+ .collect::<Result<Vec<_>, _>>()?;
+ let err_collector = ErrCollector::default();
+ self.flow_err_collectors
+ .write()
+ .await
+ .insert(flow_id, err_collector.clone());
+ let handle = &self.worker_handles[0].lock().await;
+ let create_request = worker::Request::Create {
+ flow_id,
+ plan: flow_plan,
+ sink_id,
+ sink_sender,
+ source_ids,
+ src_recvs: source_receivers,
+ expire_when,
+ create_if_not_exist,
+ err_collector,
+ };
+ handle.create_flow(create_request).await?;
+ info!("Successfully create flow with id={}", flow_id);
+ Ok(Some(flow_id))
+ }
+}
+
+/// FlowTickManager is a manager for flow tick, which trakc flow execution progress
+///
+/// TODO(discord9): better way to do it, and not expose flow tick even to other flow to avoid
+/// TSO coord mess
+#[derive(Clone)]
+pub struct FlowTickManager {
+ start: Instant,
+}
+
+impl std::fmt::Debug for FlowTickManager {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ f.debug_struct("FlowTickManager").finish()
+ }
+}
+
+impl FlowTickManager {
+ pub fn new() -> Self {
+ FlowTickManager {
+ start: Instant::now(),
+ }
+ }
+
+ /// Return the current timestamp in milliseconds
+ ///
+ /// TODO(discord9): reconsider since `tick()` require a monotonic clock and also need to survive recover later
+ pub fn tick(&self) -> repr::Timestamp {
+ let current = Instant::now();
+ let since_the_epoch = current - self.start;
+ since_the_epoch.as_millis() as repr::Timestamp
+ }
+}
diff --git a/src/flow/src/adapter/flownode_impl.rs b/src/flow/src/adapter/flownode_impl.rs
new file mode 100644
index 000000000000..057d8f932ed3
--- /dev/null
+++ b/src/flow/src/adapter/flownode_impl.rs
@@ -0,0 +1,117 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! impl `FlowNode` trait for FlowNodeManager so standalone can call them
+
+use api::v1::flow::{flow_request, CreateRequest, DropRequest, FlowRequest, FlowResponse};
+use api::v1::region::InsertRequests;
+use common_error::ext::BoxedError;
+use common_meta::error::{ExternalSnafu, Result, UnexpectedSnafu};
+use common_meta::node_manager::Flownode;
+use itertools::Itertools;
+use snafu::ResultExt;
+
+use crate::adapter::FlownodeManager;
+use crate::repr::{self, DiffRow};
+
+fn to_meta_err(err: crate::adapter::error::Error) -> common_meta::error::Error {
+ // TODO(discord9): refactor this
+ Err::<(), _>(BoxedError::new(err))
+ .with_context(|_| ExternalSnafu)
+ .unwrap_err()
+}
+
+#[async_trait::async_trait]
+impl Flownode for FlownodeManager {
+ async fn handle(&self, request: FlowRequest) -> Result<FlowResponse> {
+ let query_ctx = request
+ .header
+ .and_then(|h| h.query_context)
+ .map(|ctx| ctx.into());
+ match request.body {
+ Some(flow_request::Body::Create(CreateRequest {
+ flow_id: Some(task_id),
+ source_table_ids,
+ sink_table_name: Some(sink_table_name),
+ create_if_not_exists,
+ expire_when,
+ comment,
+ sql,
+ flow_options,
+ })) => {
+ let source_table_ids = source_table_ids.into_iter().map(|id| id.id).collect_vec();
+ let sink_table_name = [
+ sink_table_name.catalog_name,
+ sink_table_name.schema_name,
+ sink_table_name.table_name,
+ ];
+ let ret = self
+ .create_flow(
+ task_id.id as u64,
+ sink_table_name,
+ &source_table_ids,
+ create_if_not_exists,
+ Some(expire_when),
+ Some(comment),
+ sql,
+ flow_options,
+ query_ctx,
+ )
+ .await
+ .map_err(to_meta_err)?;
+ Ok(FlowResponse {
+ affected_flows: ret
+ .map(|id| greptime_proto::v1::FlowId { id: id as u32 })
+ .into_iter()
+ .collect_vec(),
+ ..Default::default()
+ })
+ }
+ Some(flow_request::Body::Drop(DropRequest {
+ flow_id: Some(flow_id),
+ })) => {
+ self.remove_flow(flow_id.id as u64)
+ .await
+ .map_err(to_meta_err)?;
+ Ok(Default::default())
+ }
+ None => UnexpectedSnafu {
+ err_msg: "Missing request body",
+ }
+ .fail(),
+ _ => UnexpectedSnafu {
+ err_msg: "Invalid request body.",
+ }
+ .fail(),
+ }
+ }
+
+ async fn handle_inserts(&self, request: InsertRequests) -> Result<FlowResponse> {
+ for write_request in request.requests {
+ let region_id = write_request.region_id;
+ let rows_proto = write_request.rows.map(|r| r.rows).unwrap_or(vec![]);
+ // TODO(discord9): reconsider time assignment mechanism
+ let now = self.tick_manager.tick();
+ let rows: Vec<DiffRow> = rows_proto
+ .into_iter()
+ .map(repr::Row::from)
+ .map(|r| (r, now, 1))
+ .collect_vec();
+ self.handle_write_request(region_id.into(), rows)
+ .await
+ .map_err(to_meta_err)?;
+ }
+ Ok(Default::default())
+ }
+}
diff --git a/src/flow/src/adapter/parse_expr.rs b/src/flow/src/adapter/parse_expr.rs
new file mode 100644
index 000000000000..3a28e813d597
--- /dev/null
+++ b/src/flow/src/adapter/parse_expr.rs
@@ -0,0 +1,245 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! parse expr like "ts <= now() - interval '5 m'"
+
+use nom::branch::alt;
+use nom::bytes::complete::{tag, tag_no_case};
+use nom::character::complete::{alphanumeric1, digit0, multispace0};
+use nom::combinator::peek;
+use nom::sequence::tuple;
+use nom::IResult;
+
+use crate::repr;
+
+#[test]
+fn test_parse_duration() {
+ let input = "1 h 5 m 42 second";
+ let (remain, ttl) = parse_duration(input).unwrap();
+ assert_eq!(remain, "");
+ assert_eq!(ttl, (3600 + 5 * 60 + 42) * 1000);
+}
+
+#[test]
+fn test_parse_fixed() {
+ let input = "timestamp < now() - INTERVAL '5m 42s'";
+ let (remain, ttl) = parse_fixed(input).unwrap();
+ assert_eq!(remain, "");
+ assert_eq!(ttl, (5 * 60 + 42) * 1000);
+}
+
+pub fn parse_fixed(input: &str) -> IResult<&str, i64> {
+ let (r, _) = tuple((
+ multispace0,
+ tag_no_case("timestamp"),
+ multispace0,
+ tag("<"),
+ multispace0,
+ tag_no_case("now()"),
+ multispace0,
+ tag("-"),
+ multispace0,
+ tag_no_case("interval"),
+ multispace0,
+ ))(input)?;
+ tuple((tag("'"), parse_duration, tag("'")))(r).map(|(r, (_, ttl, _))| (r, ttl))
+}
+
+/// parse duration and return ttl, currently only support time part of psql interval type
+pub fn parse_duration(input: &str) -> IResult<&str, i64> {
+ let mut intervals = vec![];
+ let mut remain = input;
+ while peek(parse_quality)(remain).is_ok() {
+ let (r, number) = parse_quality(remain)?;
+ let (r, unit) = parse_time_unit(r)?;
+ intervals.push((number, unit));
+ remain = r;
+ }
+ let mut total = 0;
+ for (number, unit) in intervals {
+ let number = match unit {
+ TimeUnit::Second => number,
+ TimeUnit::Minute => number * 60,
+ TimeUnit::Hour => number * 60 * 60,
+ };
+ total += number;
+ }
+ total *= 1000;
+ Ok((remain, total))
+}
+
+enum Expr {
+ Col(String),
+ Now,
+ Duration(repr::Duration),
+ Binary {
+ left: Box<Expr>,
+ op: String,
+ right: Box<Expr>,
+ },
+}
+
+fn parse_expr(input: &str) -> IResult<&str, Expr> {
+ parse_expr_bp(input, 0)
+}
+
+/// a simple pratt parser
+fn parse_expr_bp(input: &str, min_bp: u8) -> IResult<&str, Expr> {
+ let (mut input, mut lhs): (&str, Expr) = parse_item(input)?;
+ loop {
+ let (r, op) = parse_op(input)?;
+ let (_, (l_bp, r_bp)) = infix_binding_power(op)?;
+ if l_bp < min_bp {
+ return Ok((input, lhs));
+ }
+ let (r, rhs) = parse_expr_bp(r, r_bp)?;
+ input = r;
+ lhs = Expr::Binary {
+ left: Box::new(lhs),
+ op: op.to_string(),
+ right: Box::new(rhs),
+ };
+ }
+}
+
+fn parse_op(input: &str) -> IResult<&str, &str> {
+ alt((parse_add_sub, parse_cmp))(input)
+}
+
+fn parse_item(input: &str) -> IResult<&str, Expr> {
+ if let Ok((r, name)) = parse_col_name(input) {
+ Ok((r, Expr::Col(name.to_string())))
+ } else if let Ok((r, _now)) = parse_now(input) {
+ Ok((r, Expr::Now))
+ } else if let Ok((_r, _num)) = parse_quality(input) {
+ todo!()
+ } else {
+ todo!()
+ }
+}
+
+fn infix_binding_power(op: &str) -> IResult<&str, (u8, u8)> {
+ let ret = match op {
+ "<" | ">" | "<=" | ">=" => (1, 2),
+ "+" | "-" => (3, 4),
+ _ => {
+ return Err(nom::Err::Error(nom::error::Error::new(
+ op,
+ nom::error::ErrorKind::Fail,
+ )))
+ }
+ };
+ Ok((op, ret))
+}
+
+fn parse_col_name(input: &str) -> IResult<&str, &str> {
+ tuple((multispace0, alphanumeric1, multispace0))(input).map(|(r, (_, name, _))| (r, name))
+}
+
+fn parse_now(input: &str) -> IResult<&str, &str> {
+ tag_no_case("now()")(input)
+}
+
+fn parse_add_sub(input: &str) -> IResult<&str, &str> {
+ tuple((multispace0, alt((tag("+"), tag("-"))), multispace0))(input)
+ .map(|(r, (_, op, _))| (r, op))
+}
+
+fn parse_cmp(input: &str) -> IResult<&str, &str> {
+ tuple((
+ multispace0,
+ alt((tag("<="), tag(">="), tag("<"), tag(">"))),
+ multispace0,
+ ))(input)
+ .map(|(r, (_, op, _))| (r, op))
+}
+
+/// parse a number with optional sign
+fn parse_quality(input: &str) -> IResult<&str, repr::Duration> {
+ tuple((
+ multispace0,
+ alt((tag("+"), tag("-"), tag(""))),
+ digit0,
+ multispace0,
+ ))(input)
+ .map(|(r, (_, sign, name, _))| (r, sign, name))
+ .and_then(|(r, sign, name)| {
+ let num = name.parse::<repr::Duration>().map_err(|_| {
+ nom::Err::Error(nom::error::Error::new(input, nom::error::ErrorKind::Digit))
+ })?;
+ let num = match sign {
+ "+" => num,
+ "-" => -num,
+ _ => num,
+ };
+ Ok((r, num))
+ })
+}
+
+#[derive(Debug, Clone)]
+enum TimeUnit {
+ Second,
+ Minute,
+ Hour,
+}
+
+#[derive(Debug, Clone)]
+enum DateUnit {
+ Day,
+ Month,
+ Year,
+}
+
+fn parse_time_unit(input: &str) -> IResult<&str, TimeUnit> {
+ fn to_second(input: &str) -> IResult<&str, TimeUnit> {
+ alt((
+ tag_no_case("second"),
+ tag_no_case("seconds"),
+ tag_no_case("S"),
+ ))(input)
+ .map(move |(r, _)| (r, TimeUnit::Second))
+ }
+ fn to_minute(input: &str) -> IResult<&str, TimeUnit> {
+ alt((
+ tag_no_case("minute"),
+ tag_no_case("minutes"),
+ tag_no_case("m"),
+ ))(input)
+ .map(move |(r, _)| (r, TimeUnit::Minute))
+ }
+ fn to_hour(input: &str) -> IResult<&str, TimeUnit> {
+ alt((tag_no_case("hour"), tag_no_case("hours"), tag_no_case("h")))(input)
+ .map(move |(r, _)| (r, TimeUnit::Hour))
+ }
+
+ tuple((
+ multispace0,
+ alt((
+ to_second, to_minute,
+ to_hour, /*
+ tag_no_case("day"),
+ tag_no_case("days"),
+ tag_no_case("d"),
+ tag_no_case("month"),
+ tag_no_case("months"),
+ tag_no_case("m"),
+ tag_no_case("year"),
+ tag_no_case("years"),
+ tag_no_case("y"),
+ */
+ )),
+ multispace0,
+ ))(input)
+ .map(|(r, (_, unit, _))| (r, unit))
+}
diff --git a/src/flow/src/adapter/server.rs b/src/flow/src/adapter/server.rs
new file mode 100644
index 000000000000..c0d0854572c7
--- /dev/null
+++ b/src/flow/src/adapter/server.rs
@@ -0,0 +1,147 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Implementation of grpc service for flow node
+
+use std::net::SocketAddr;
+
+use common_meta::node_manager::Flownode;
+use common_telemetry::tracing::info;
+use futures::FutureExt;
+use greptime_proto::v1::flow::{flow_server, FlowRequest, FlowResponse, InsertRequests};
+use itertools::Itertools;
+use servers::error::{AlreadyStartedSnafu, StartGrpcSnafu, TcpBindSnafu, TcpIncomingSnafu};
+use snafu::{ensure, ResultExt};
+use tokio::net::TcpListener;
+use tokio::sync::{oneshot, Mutex};
+use tonic::transport::server::TcpIncoming;
+use tonic::{Request, Response, Status};
+
+use crate::adapter::FlownodeManagerRef;
+pub const FLOW_NODE_SERVER_NAME: &str = "FLOW_NODE_SERVER";
+
+/// wrapping flow node manager to avoid orphan rule with Arc<...>
+#[derive(Clone)]
+pub struct FlowService {
+ pub manager: FlownodeManagerRef,
+}
+
+#[async_trait::async_trait]
+impl flow_server::Flow for FlowService {
+ async fn handle_create_remove(
+ &self,
+ request: Request<FlowRequest>,
+ ) -> Result<Response<FlowResponse>, Status> {
+ let request = request.into_inner();
+ self.manager
+ .handle(request)
+ .await
+ .map(Response::new)
+ .map_err(|e| {
+ let msg = format!("failed to handle request: {:?}", e);
+ Status::internal(msg)
+ })
+ }
+
+ async fn handle_mirror_request(
+ &self,
+ request: Request<InsertRequests>,
+ ) -> Result<Response<FlowResponse>, Status> {
+ let request = request.into_inner();
+ // TODO(discord9): fix protobuf import order shenanigans to remove this duplicated define
+ let request = api::v1::region::InsertRequests {
+ requests: request
+ .requests
+ .into_iter()
+ .map(|insert| api::v1::region::InsertRequest {
+ region_id: insert.region_id,
+ rows: insert.rows,
+ })
+ .collect_vec(),
+ };
+ self.manager
+ .handle_inserts(request)
+ .await
+ .map(Response::new)
+ .map_err(|e| {
+ let msg = format!("failed to handle request: {:?}", e);
+ Status::internal(msg)
+ })
+ }
+}
+
+pub struct FlownodeServer {
+ pub shutdown_tx: Mutex<Option<oneshot::Sender<()>>>,
+ pub flow_service: FlowService,
+}
+
+impl FlownodeServer {
+ pub fn create_flow_service(&self) -> flow_server::FlowServer<impl flow_server::Flow> {
+ flow_server::FlowServer::new(self.flow_service.clone())
+ }
+}
+
+#[async_trait::async_trait]
+impl servers::server::Server for FlownodeServer {
+ async fn shutdown(&self) -> Result<(), servers::error::Error> {
+ let mut shutdown_tx = self.shutdown_tx.lock().await;
+ if let Some(tx) = shutdown_tx.take() {
+ if tx.send(()).is_err() {
+ info!("Receiver dropped, the flow node server has already shutdown");
+ }
+ }
+ info!("Shutdown flow node server");
+
+ Ok(())
+ }
+ async fn start(&self, addr: SocketAddr) -> Result<SocketAddr, servers::error::Error> {
+ let (tx, rx) = oneshot::channel::<()>();
+ let (incoming, addr) = {
+ let mut shutdown_tx = self.shutdown_tx.lock().await;
+ ensure!(
+ shutdown_tx.is_none(),
+ AlreadyStartedSnafu { server: "flow" }
+ );
+ let listener = TcpListener::bind(addr)
+ .await
+ .context(TcpBindSnafu { addr })?;
+ let addr = listener.local_addr().context(TcpBindSnafu { addr })?;
+ let incoming =
+ TcpIncoming::from_listener(listener, true, None).context(TcpIncomingSnafu)?;
+ info!("flow server is bound to {}", addr);
+
+ *shutdown_tx = Some(tx);
+
+ (incoming, addr)
+ };
+
+ let builder = tonic::transport::Server::builder().add_service(self.create_flow_service());
+ let _handle = common_runtime::spawn_bg(async move {
+ let _result = builder
+ .serve_with_incoming_shutdown(incoming, rx.map(drop))
+ .await
+ .context(StartGrpcSnafu);
+ });
+
+ // TODO(discord9): better place for dataflow to run per second
+ let manager_ref = self.flow_service.manager.clone();
+ let _handle = manager_ref.clone().run_background();
+
+ Ok(addr)
+ }
+
+ fn name(&self) -> &str {
+ FLOW_NODE_SERVER_NAME
+ }
+}
diff --git a/src/flow/src/adapter/tests.rs b/src/flow/src/adapter/tests.rs
new file mode 100644
index 000000000000..4690ff54f021
--- /dev/null
+++ b/src/flow/src/adapter/tests.rs
@@ -0,0 +1,64 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Mock test for adapter module
+//! TODO(discord9): write mock test
+
+use datatypes::schema::{ColumnSchema, SchemaBuilder};
+use store_api::storage::ConcreteDataType;
+use table::metadata::{TableInfo, TableInfoBuilder, TableMetaBuilder};
+
+use super::*;
+
+pub fn new_test_table_info_with_name<I: IntoIterator<Item = u32>>(
+ table_id: TableId,
+ table_name: &str,
+ region_numbers: I,
+) -> TableInfo {
+ let column_schemas = vec![
+ ColumnSchema::new("number", ConcreteDataType::int32_datatype(), true),
+ ColumnSchema::new(
+ "ts",
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ false,
+ )
+ .with_time_index(true),
+ ];
+ let schema = SchemaBuilder::try_from(column_schemas)
+ .unwrap()
+ .version(123)
+ .build()
+ .unwrap();
+
+ let meta = TableMetaBuilder::default()
+ .schema(Arc::new(schema))
+ .primary_key_indices(vec![0])
+ .engine("engine")
+ .next_column_id(3)
+ .region_numbers(region_numbers.into_iter().collect::<Vec<_>>())
+ .build()
+ .unwrap();
+ TableInfoBuilder::default()
+ .table_id(table_id)
+ .table_version(5)
+ .name(table_name)
+ .meta(meta)
+ .build()
+ .unwrap()
+}
+
+/// Create a mock harness for flow node manager
+///
+/// containing several default table info and schema
+fn mock_harness_flow_node_manager() {}
diff --git a/src/flow/src/adapter/worker.rs b/src/flow/src/adapter/worker.rs
index 3e58a4307c79..1dc41db0487f 100644
--- a/src/flow/src/adapter/worker.rs
+++ b/src/flow/src/adapter/worker.rs
@@ -15,15 +15,16 @@
//! For single-thread flow worker
use std::collections::{BTreeMap, VecDeque};
-use std::sync::atomic::{AtomicUsize, Ordering};
+use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::Arc;
+use common_telemetry::info;
use enum_as_inner::EnumAsInner;
use hydroflow::scheduled::graph::Hydroflow;
use snafu::{ensure, OptionExt};
use tokio::sync::{broadcast, mpsc, Mutex};
-use crate::adapter::error::{Error, FlowAlreadyExistSnafu, InternalSnafu};
+use crate::adapter::error::{Error, FlowAlreadyExistSnafu, InternalSnafu, UnexpectedSnafu};
use crate::adapter::FlowId;
use crate::compute::{Context, DataflowState, ErrCollector};
use crate::expr::GlobalId;
@@ -39,6 +40,7 @@ pub fn create_worker<'a>() -> (WorkerHandle, Worker<'a>) {
let (itc_client, itc_server) = create_inter_thread_call();
let worker_handle = WorkerHandle {
itc_client: Mutex::new(itc_client),
+ shutdown: AtomicBool::new(false),
};
let worker = Worker {
task_states: BTreeMap::new(),
@@ -105,6 +107,7 @@ impl<'subgraph> ActiveDataflowState<'subgraph> {
#[derive(Debug)]
pub struct WorkerHandle {
itc_client: Mutex<InterThreadCallClient>,
+ shutdown: AtomicBool,
}
impl WorkerHandle {
@@ -123,7 +126,7 @@ impl WorkerHandle {
.itc_client
.lock()
.await
- .call_blocking(create_reqs)
+ .call_with_resp(create_reqs)
.await?;
ret.into_create().map_err(|ret| {
InternalSnafu {
@@ -138,7 +141,7 @@ impl WorkerHandle {
/// remove task, return task id
pub async fn remove_flow(&self, flow_id: FlowId) -> Result<bool, Error> {
let req = Request::Remove { flow_id };
- let ret = self.itc_client.lock().await.call_blocking(req).await?;
+ let ret = self.itc_client.lock().await.call_with_resp(req).await?;
ret.into_remove().map_err(|ret| {
InternalSnafu {
@@ -157,13 +160,12 @@ impl WorkerHandle {
self.itc_client
.lock()
.await
- .call_non_blocking(Request::RunAvail { now })
- .await
+ .call_no_resp(Request::RunAvail { now })
}
pub async fn contains_flow(&self, flow_id: FlowId) -> Result<bool, Error> {
let req = Request::ContainTask { flow_id };
- let ret = self.itc_client.lock().await.call_blocking(req).await?;
+ let ret = self.itc_client.lock().await.call_with_resp(req).await?;
ret.into_contain_task().map_err(|ret| {
InternalSnafu {
@@ -177,11 +179,37 @@ impl WorkerHandle {
/// shutdown the worker
pub async fn shutdown(&self) -> Result<(), Error> {
- self.itc_client
- .lock()
- .await
- .call_non_blocking(Request::Shutdown)
- .await
+ if !self.shutdown.fetch_or(true, Ordering::SeqCst) {
+ self.itc_client.lock().await.call_no_resp(Request::Shutdown)
+ } else {
+ UnexpectedSnafu {
+ reason: "Worker already shutdown",
+ }
+ .fail()
+ }
+ }
+
+ /// shutdown the worker
+ pub fn shutdown_blocking(&self) -> Result<(), Error> {
+ if !self.shutdown.fetch_or(true, Ordering::SeqCst) {
+ self.itc_client
+ .blocking_lock()
+ .call_no_resp(Request::Shutdown)
+ } else {
+ UnexpectedSnafu {
+ reason: "Worker already shutdown",
+ }
+ .fail()
+ }
+ }
+}
+
+impl Drop for WorkerHandle {
+ fn drop(&mut self) {
+ if let Err(err) = self.shutdown_blocking() {
+ common_telemetry::error!("Fail to shutdown worker: {:?}", err)
+ }
+ info!("Flow Worker shutdown due to Worker Handle dropped.")
}
}
@@ -395,7 +423,7 @@ struct InterThreadCallClient {
impl InterThreadCallClient {
/// call without expecting responses or blocking
- async fn call_non_blocking(&self, req: Request) -> Result<(), Error> {
+ fn call_no_resp(&self, req: Request) -> Result<(), Error> {
// TODO(discord9): relax memory order later
let call_id = self.call_id.fetch_add(1, Ordering::SeqCst);
self.arg_sender
@@ -404,7 +432,7 @@ impl InterThreadCallClient {
}
/// call blocking, and return the result
- async fn call_blocking(&mut self, req: Request) -> Result<Response, Error> {
+ async fn call_with_resp(&mut self, req: Request) -> Result<Response, Error> {
// TODO(discord9): relax memory order later
let call_id = self.call_id.fetch_add(1, Ordering::SeqCst);
self.arg_sender
diff --git a/src/flow/src/compute/render/src_sink.rs b/src/flow/src/compute/render/src_sink.rs
index 77f3e4105382..33ecb9670caa 100644
--- a/src/flow/src/compute/render/src_sink.rs
+++ b/src/flow/src/compute/render/src_sink.rs
@@ -36,6 +36,7 @@ impl<'referred, 'df> Context<'referred, 'df> {
&mut self,
mut src_recv: broadcast::Receiver<DiffRow>,
) -> Result<CollectionBundle, Error> {
+ debug!("Rendering Source");
let (send_port, recv_port) = self.df.make_edge::<_, Toff>("source");
let arrange_handler = self.compute_state.new_arrange(None);
let arrange_handler_inner =
@@ -60,7 +61,6 @@ impl<'referred, 'df> Context<'referred, 'df> {
let prev_avail = arr.into_iter().map(|((k, _), t, d)| (k, t, d));
let mut to_send = Vec::new();
let mut to_arrange = Vec::new();
-
// TODO(discord9): handling tokio broadcast error
while let Ok((r, t, d)) = src_recv.try_recv() {
if t <= now {
@@ -72,7 +72,7 @@ impl<'referred, 'df> Context<'referred, 'df> {
let all = prev_avail.chain(to_send).collect_vec();
if !all.is_empty() || !to_arrange.is_empty() {
debug!(
- "All send: {} rows, not yet send: {} rows",
+ "Rendered Source All send: {} rows, not yet send: {} rows",
all.len(),
to_arrange.len()
);
diff --git a/src/flow/src/expr/scalar.rs b/src/flow/src/expr/scalar.rs
index af51d0a53a2c..098de9c102e1 100644
--- a/src/flow/src/expr/scalar.rs
+++ b/src/flow/src/expr/scalar.rs
@@ -43,6 +43,7 @@ impl TypedExpr {
}
}
+/// TODO(discord9): add tumble function here
/// A scalar expression, which can be evaluated to a value.
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum ScalarExpr {
diff --git a/src/flow/src/lib.rs b/src/flow/src/lib.rs
index dac53002cbc1..e0ebae2bd226 100644
--- a/src/flow/src/lib.rs
+++ b/src/flow/src/lib.rs
@@ -17,6 +17,7 @@
//! It also contains definition of expression, adapter and plan, and internal state management.
#![feature(let_chains)]
+#![feature(duration_abs_diff)]
#![allow(dead_code)]
#![allow(unused_imports)]
#![warn(missing_docs)]
@@ -30,3 +31,5 @@ mod plan;
mod repr;
mod transform;
mod utils;
+
+pub use adapter::{FlownodeBuilder, FlownodeManager, FlownodeManagerRef, FlownodeOptions};
diff --git a/src/frontend/src/instance/standalone.rs b/src/frontend/src/instance/standalone.rs
index 911c7fd30b11..5c9e7a46f65a 100644
--- a/src/frontend/src/instance/standalone.rs
+++ b/src/frontend/src/instance/standalone.rs
@@ -31,16 +31,19 @@ use snafu::{OptionExt, ResultExt};
use crate::error::{InvalidRegionRequestSnafu, InvokeRegionServerSnafu, Result};
-pub struct StandaloneDatanodeManager(pub RegionServer);
+pub struct StandaloneDatanodeManager {
+ pub region_server: RegionServer,
+ pub flow_server: FlownodeRef,
+}
#[async_trait]
impl NodeManager for StandaloneDatanodeManager {
async fn datanode(&self, _datanode: &Peer) -> DatanodeRef {
- RegionInvoker::arc(self.0.clone())
+ RegionInvoker::arc(self.region_server.clone())
}
async fn flownode(&self, _node: &Peer) -> FlownodeRef {
- unimplemented!()
+ self.flow_server.clone()
}
}
diff --git a/tests-integration/Cargo.toml b/tests-integration/Cargo.toml
index 01681c37b0c1..fe6e3e3b0600 100644
--- a/tests-integration/Cargo.toml
+++ b/tests-integration/Cargo.toml
@@ -39,6 +39,7 @@ common-wal.workspace = true
datanode = { workspace = true }
datatypes.workspace = true
dotenv.workspace = true
+flow.workspace = true
frontend = { workspace = true, features = ["testing"] }
futures.workspace = true
futures-util.workspace = true
diff --git a/tests-integration/src/standalone.rs b/tests-integration/src/standalone.rs
index 4d99c9744fab..5cbc46c69305 100644
--- a/tests-integration/src/standalone.rs
+++ b/tests-integration/src/standalone.rs
@@ -35,6 +35,7 @@ use common_procedure::options::ProcedureConfig;
use common_procedure::ProcedureManagerRef;
use common_wal::config::{DatanodeWalConfig, MetasrvWalConfig};
use datanode::datanode::DatanodeBuilder;
+use flow::FlownodeBuilder;
use frontend::instance::builder::FrontendBuilder;
use frontend::instance::{FrontendInstance, Instance, StandaloneDatanodeManager};
use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ};
@@ -128,6 +129,7 @@ impl GreptimeDbStandaloneBuilder {
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend.clone()));
table_metadata_manager.init().await.unwrap();
+
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(kv_backend.clone()));
let layered_cache_builder = LayeredCacheRegistryBuilder::default();
@@ -149,7 +151,19 @@ impl GreptimeDbStandaloneBuilder {
)
.await;
- let node_manager = Arc::new(StandaloneDatanodeManager(datanode.region_server()));
+ let flow_builder = FlownodeBuilder::new(
+ 1, // for standalone mode this value is default to one
+ Default::default(),
+ plugins.clone(),
+ table_metadata_manager.clone(),
+ catalog_manager.clone(),
+ );
+ let flownode = Arc::new(flow_builder.build().await);
+
+ let node_manager = Arc::new(StandaloneDatanodeManager {
+ region_server: datanode.region_server(),
+ flow_server: flownode.clone(),
+ });
let table_id_sequence = Arc::new(
SequenceBuilder::new(TABLE_ID_SEQ, kv_backend.clone())
@@ -204,6 +218,11 @@ impl GreptimeDbStandaloneBuilder {
.await
.unwrap();
+ flownode
+ .set_frontend_invoker(Box::new(instance.clone()))
+ .await;
+ let _node_handle = flownode.run_background();
+
procedure_manager.start().await.unwrap();
wal_options_allocator.start().await.unwrap();
|
feat
|
flow node manager (#3954)
|
32a0023010fa33d386143a8f314c3a85dd42db91
|
2024-10-25 09:15:24
|
shuiyisong
|
chore: add schema urls to otlp logs (#4876)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index f447ea1148bb..d4db1185f331 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -200,12 +200,6 @@ version = "1.0.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6"
-[[package]]
-name = "anymap"
-version = "1.0.0-beta.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8f1f8f5a6f3d50d89e3797d7593a50f96bb2aaa20ca0cc7be1fb673232c91d72"
-
[[package]]
name = "anymap2"
version = "0.13.0"
@@ -6424,16 +6418,6 @@ dependencies = [
"url",
]
-[[package]]
-name = "meter-core"
-version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-meter.git?rev=80eb97c24c88af4dd9a86f8bbaf50e741d4eb8cd#80eb97c24c88af4dd9a86f8bbaf50e741d4eb8cd"
-dependencies = [
- "anymap",
- "once_cell",
- "parking_lot 0.12.3",
-]
-
[[package]]
name = "meter-core"
version = "0.1.0"
@@ -6447,9 +6431,9 @@ dependencies = [
[[package]]
name = "meter-macros"
version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-meter.git?rev=80eb97c24c88af4dd9a86f8bbaf50e741d4eb8cd#80eb97c24c88af4dd9a86f8bbaf50e741d4eb8cd"
+source = "git+https://github.com/GreptimeTeam/greptime-meter.git?rev=a10facb353b41460eeb98578868ebf19c2084fac#a10facb353b41460eeb98578868ebf19c2084fac"
dependencies = [
- "meter-core 0.1.0 (git+https://github.com/GreptimeTeam/greptime-meter.git?rev=80eb97c24c88af4dd9a86f8bbaf50e741d4eb8cd)",
+ "meter-core",
]
[[package]]
@@ -7614,7 +7598,7 @@ dependencies = [
"futures-util",
"lazy_static",
"meta-client",
- "meter-core 0.1.0 (git+https://github.com/GreptimeTeam/greptime-meter.git?rev=a10facb353b41460eeb98578868ebf19c2084fac)",
+ "meter-core",
"meter-macros",
"moka",
"object-store",
@@ -9018,7 +9002,7 @@ dependencies = [
"humantime",
"itertools 0.10.5",
"lazy_static",
- "meter-core 0.1.0 (git+https://github.com/GreptimeTeam/greptime-meter.git?rev=a10facb353b41460eeb98578868ebf19c2084fac)",
+ "meter-core",
"meter-macros",
"num",
"num-traits",
@@ -10903,7 +10887,7 @@ dependencies = [
"common-telemetry",
"common-time",
"derive_builder 0.12.0",
- "meter-core 0.1.0 (git+https://github.com/GreptimeTeam/greptime-meter.git?rev=a10facb353b41460eeb98578868ebf19c2084fac)",
+ "meter-core",
"snafu 0.8.5",
"sql",
]
diff --git a/Cargo.toml b/Cargo.toml
index 60d8215634fb..daf79b17f931 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -261,7 +261,7 @@ tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls" }
[workspace.dependencies.meter-macros]
git = "https://github.com/GreptimeTeam/greptime-meter.git"
-rev = "80eb97c24c88af4dd9a86f8bbaf50e741d4eb8cd"
+rev = "a10facb353b41460eeb98578868ebf19c2084fac"
[profile.release]
debug = 1
diff --git a/src/frontend/src/instance/otlp.rs b/src/frontend/src/instance/otlp.rs
index a2245d8fb84a..0c12658b37a5 100644
--- a/src/frontend/src/instance/otlp.rs
+++ b/src/frontend/src/instance/otlp.rs
@@ -113,6 +113,7 @@ impl OpenTelemetryProtocolHandler for Instance {
.plugins
.get::<OpenTelemetryProtocolInterceptorRef<servers::error::Error>>();
interceptor_ref.pre_execute(ctx.clone())?;
+
let (requests, rows) = otlp::logs::to_grpc_insert_requests(request, pipeline, table_name)?;
self.handle_log_inserts(requests, ctx)
.await
diff --git a/src/servers/src/http/otlp.rs b/src/servers/src/http/otlp.rs
index 51f2683e1b6b..6e5a583c0d62 100644
--- a/src/servers/src/http/otlp.rs
+++ b/src/servers/src/http/otlp.rs
@@ -41,7 +41,7 @@ use snafu::prelude::*;
use super::header::constants::GREPTIME_LOG_EXTRACT_KEYS_HEADER_NAME;
use super::header::{write_cost_header_map, CONTENT_TYPE_PROTOBUF};
-use crate::error::{self, Result};
+use crate::error::{self, PipelineSnafu, Result};
use crate::http::header::constants::{
GREPTIME_LOG_PIPELINE_NAME_HEADER_NAME, GREPTIME_LOG_PIPELINE_VERSION_HEADER_NAME,
GREPTIME_LOG_TABLE_NAME_HEADER_NAME,
@@ -227,15 +227,9 @@ pub async fn logs(
.start_timer();
let request = ExportLogsServiceRequest::decode(bytes).context(error::DecodeOtlpRequestSnafu)?;
- let pipeline_way;
- if let Some(pipeline_name) = &pipeline_info.pipeline_name {
+ let pipeline_way = if let Some(pipeline_name) = &pipeline_info.pipeline_name {
let pipeline_version =
- to_pipeline_version(pipeline_info.pipeline_version).map_err(|_| {
- error::InvalidParameterSnafu {
- reason: GREPTIME_LOG_PIPELINE_VERSION_HEADER_NAME,
- }
- .build()
- })?;
+ to_pipeline_version(pipeline_info.pipeline_version).context(PipelineSnafu)?;
let pipeline = match handler
.get_pipeline(pipeline_name, pipeline_version, query_ctx.clone())
.await
@@ -245,10 +239,10 @@ pub async fn logs(
return Err(e);
}
};
- pipeline_way = PipelineWay::Custom(pipeline);
+ PipelineWay::Custom(pipeline)
} else {
- pipeline_way = PipelineWay::OtlpLog(Box::new(select_info));
- }
+ PipelineWay::OtlpLog(Box::new(select_info))
+ };
handler
.logs(request, pipeline_way, table_info.table_name, query_ctx)
diff --git a/src/servers/src/otlp/logs.rs b/src/servers/src/otlp/logs.rs
index 5faaced461ef..8f31a1db064b 100644
--- a/src/servers/src/otlp/logs.rs
+++ b/src/servers/src/otlp/logs.rs
@@ -186,36 +186,51 @@ fn log_to_pipeline_value(
fn build_otlp_logs_identity_schema() -> Vec<ColumnSchema> {
[
(
- "scope_name",
+ "timestamp",
+ ColumnDataType::TimestampNanosecond,
+ SemanticType::Timestamp,
+ None,
+ None,
+ ),
+ (
+ "trace_id",
ColumnDataType::String,
- SemanticType::Tag,
+ SemanticType::Field,
None,
None,
),
(
- "scope_version",
+ "span_id",
ColumnDataType::String,
SemanticType::Field,
None,
None,
),
(
- "scope_attributes",
- ColumnDataType::Binary,
+ "severity_text",
+ ColumnDataType::String,
SemanticType::Field,
- Some(ColumnDataTypeExtension {
- type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())),
- }),
+ None,
None,
),
(
- "resource_attributes",
- ColumnDataType::Binary,
+ "severity_number",
+ ColumnDataType::Int32,
+ SemanticType::Field,
+ None,
+ None,
+ ),
+ (
+ "body",
+ ColumnDataType::String,
SemanticType::Field,
- Some(ColumnDataTypeExtension {
- type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())),
- }),
None,
+ Some(ColumnOptions {
+ options: StdHashMap::from([(
+ "fulltext".to_string(),
+ r#"{"enable":true}"#.to_string(),
+ )]),
+ }),
),
(
"log_attributes",
@@ -227,65 +242,57 @@ fn build_otlp_logs_identity_schema() -> Vec<ColumnSchema> {
None,
),
(
- "timestamp",
- ColumnDataType::TimestampNanosecond,
- SemanticType::Timestamp,
- None,
- None,
- ),
- (
- "observed_timestamp",
- ColumnDataType::TimestampNanosecond,
+ "trace_flags",
+ ColumnDataType::Uint32,
SemanticType::Field,
None,
None,
),
(
- "trace_id",
+ "scope_name",
ColumnDataType::String,
SemanticType::Tag,
None,
None,
),
(
- "span_id",
+ "scope_version",
ColumnDataType::String,
- SemanticType::Tag,
+ SemanticType::Field,
None,
None,
),
(
- "trace_flags",
- ColumnDataType::Uint32,
+ "scope_attributes",
+ ColumnDataType::Binary,
SemanticType::Field,
- None,
+ Some(ColumnDataTypeExtension {
+ type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())),
+ }),
None,
),
(
- "severity_text",
+ "scope_schema_url",
ColumnDataType::String,
SemanticType::Field,
None,
None,
),
(
- "severity_number",
- ColumnDataType::Int32,
+ "resource_attributes",
+ ColumnDataType::Binary,
SemanticType::Field,
- None,
+ Some(ColumnDataTypeExtension {
+ type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())),
+ }),
None,
),
(
- "body",
+ "resource_schema_url",
ColumnDataType::String,
SemanticType::Field,
None,
- Some(ColumnOptions {
- options: StdHashMap::from([(
- "fulltext".to_string(),
- r#"{"enable":true}"#.to_string(),
- )]),
- }),
+ None,
),
]
.into_iter()
@@ -301,60 +308,59 @@ fn build_otlp_logs_identity_schema() -> Vec<ColumnSchema> {
.collect::<Vec<ColumnSchema>>()
}
-fn build_otlp_build_in_row(
- log: LogRecord,
- resource_attr: JsonbValue<'static>,
- scope_name: Option<String>,
- scope_version: Option<String>,
- scope_attrs: JsonbValue<'static>,
-) -> Row {
+fn build_otlp_build_in_row(log: LogRecord, parse_ctx: &mut ParseContext) -> Row {
let log_attr = key_value_to_jsonb(log.attributes);
+ let ts = if log.time_unix_nano != 0 {
+ log.time_unix_nano
+ } else {
+ log.observed_time_unix_nano
+ };
+
let row = vec![
GreptimeValue {
- value_data: scope_name.map(ValueData::StringValue),
+ value_data: Some(ValueData::TimestampNanosecondValue(ts as i64)),
},
GreptimeValue {
- value_data: scope_version.map(ValueData::StringValue),
+ value_data: Some(ValueData::StringValue(bytes_to_hex_string(&log.trace_id))),
},
GreptimeValue {
- value_data: Some(ValueData::BinaryValue(scope_attrs.to_vec())),
+ value_data: Some(ValueData::StringValue(bytes_to_hex_string(&log.span_id))),
},
GreptimeValue {
- value_data: Some(ValueData::BinaryValue(resource_attr.to_vec())),
+ value_data: Some(ValueData::StringValue(log.severity_text)),
},
GreptimeValue {
- value_data: Some(ValueData::BinaryValue(log_attr.to_vec())),
+ value_data: Some(ValueData::I32Value(log.severity_number)),
},
GreptimeValue {
- value_data: Some(ValueData::TimestampNanosecondValue(
- log.time_unix_nano as i64,
- )),
+ value_data: log
+ .body
+ .as_ref()
+ .map(|x| ValueData::StringValue(log_body_to_string(x))),
},
GreptimeValue {
- value_data: Some(ValueData::TimestampNanosecondValue(
- log.observed_time_unix_nano as i64,
- )),
+ value_data: Some(ValueData::BinaryValue(log_attr.to_vec())),
},
GreptimeValue {
- value_data: Some(ValueData::StringValue(bytes_to_hex_string(&log.trace_id))),
+ value_data: Some(ValueData::U32Value(log.flags)),
},
GreptimeValue {
- value_data: Some(ValueData::StringValue(bytes_to_hex_string(&log.span_id))),
+ value_data: parse_ctx.scope_name.clone().map(ValueData::StringValue),
},
GreptimeValue {
- value_data: Some(ValueData::U32Value(log.flags)),
+ value_data: parse_ctx.scope_version.clone().map(ValueData::StringValue),
},
GreptimeValue {
- value_data: Some(ValueData::StringValue(log.severity_text)),
+ value_data: Some(ValueData::BinaryValue(parse_ctx.scope_attrs.to_vec())),
},
GreptimeValue {
- value_data: Some(ValueData::I32Value(log.severity_number)),
+ value_data: Some(ValueData::StringValue(parse_ctx.scope_url.clone())),
},
GreptimeValue {
- value_data: log
- .body
- .as_ref()
- .map(|x| ValueData::StringValue(log_body_to_string(x))),
+ value_data: Some(ValueData::BinaryValue(parse_ctx.resource_attr.to_vec())),
+ },
+ GreptimeValue {
+ value_data: Some(ValueData::StringValue(parse_ctx.resource_url.clone())),
},
];
Row { values: row }
@@ -363,7 +369,7 @@ fn build_otlp_build_in_row(
fn extract_field_from_attr_and_combine_schema(
schema_info: &mut SchemaInfo,
log_select: &SelectInfo,
- jsonb: &jsonb::Value<'static>,
+ jsonb: &jsonb::Value,
) -> Result<Vec<GreptimeValue>> {
if log_select.keys.is_empty() {
return Ok(Vec::new());
@@ -498,13 +504,12 @@ fn parse_export_logs_service_request_to_rows(
let mut extra_resource_schema = SchemaInfo::default();
let mut extra_scope_schema = SchemaInfo::default();
let mut extra_log_schema = SchemaInfo::default();
- let parse_infos = parse_resource(
- &select_info,
+ let mut parse_ctx = ParseContext::new(
&mut extra_resource_schema,
&mut extra_scope_schema,
&mut extra_log_schema,
- request.resource_logs,
- )?;
+ );
+ let parse_infos = parse_resource(&select_info, &mut parse_ctx, request.resource_logs)?;
// order of schema is important
// resource < scope < log
@@ -557,28 +562,27 @@ fn parse_export_logs_service_request_to_rows(
fn parse_resource(
select_info: &SelectInfo,
- extra_resource_schema: &mut SchemaInfo,
- extra_scope_schema: &mut SchemaInfo,
- extra_log_schema: &mut SchemaInfo,
+ parse_ctx: &mut ParseContext,
resource_logs_vec: Vec<ResourceLogs>,
) -> Result<Vec<ParseInfo>> {
let mut results = Vec::new();
+
for r in resource_logs_vec {
- let resource_attr = r
+ parse_ctx.resource_attr = r
.resource
.map(|resource| key_value_to_jsonb(resource.attributes))
.unwrap_or(JsonbValue::Null);
+ parse_ctx.resource_url = r.schema_url;
+
let resource_extracted_values = extract_field_from_attr_and_combine_schema(
- extra_resource_schema,
+ parse_ctx.extra_resource_schema,
select_info,
- &resource_attr,
+ &parse_ctx.resource_attr,
)?;
let rows = parse_scope(
- extra_scope_schema,
- extra_log_schema,
select_info,
r.scope_logs,
- resource_attr,
+ parse_ctx,
resource_extracted_values,
)?;
results.extend(rows);
@@ -586,38 +590,65 @@ fn parse_resource(
Ok(results)
}
-struct ScopeInfo {
+struct ParseContext<'a> {
+ // selector schema
+ extra_resource_schema: &'a mut SchemaInfo,
+ extra_scope_schema: &'a mut SchemaInfo,
+ extra_log_schema: &'a mut SchemaInfo,
+
+ // passdown values
+ resource_url: String,
+ resource_attr: JsonbValue<'a>,
scope_name: Option<String>,
scope_version: Option<String>,
- scope_attrs: JsonbValue<'static>,
+ scope_url: String,
+ scope_attrs: JsonbValue<'a>,
+}
+
+impl<'a> ParseContext<'a> {
+ pub fn new(
+ extra_resource_schema: &'a mut SchemaInfo,
+ extra_scope_schema: &'a mut SchemaInfo,
+ extra_log_schema: &'a mut SchemaInfo,
+ ) -> ParseContext<'a> {
+ ParseContext {
+ extra_resource_schema,
+ extra_scope_schema,
+ extra_log_schema,
+ resource_url: String::new(),
+ resource_attr: JsonbValue::Null,
+ scope_name: None,
+ scope_version: None,
+ scope_url: String::new(),
+ scope_attrs: JsonbValue::Null,
+ }
+ }
}
fn parse_scope(
- extra_scope_schema: &mut SchemaInfo,
- extra_log_schema: &mut SchemaInfo,
select_info: &SelectInfo,
scopes_log_vec: Vec<ScopeLogs>,
- resource_attr: JsonbValue<'static>,
+ parse_ctx: &mut ParseContext,
resource_extracted_values: Vec<GreptimeValue>,
) -> Result<Vec<ParseInfo>> {
let mut results = Vec::new();
for scope_logs in scopes_log_vec {
let (scope_attrs, scope_version, scope_name) = scope_to_jsonb(scope_logs.scope);
+ parse_ctx.scope_name = scope_name;
+ parse_ctx.scope_version = scope_version;
+ parse_ctx.scope_attrs = scope_attrs;
+ parse_ctx.scope_url = scope_logs.schema_url;
+
let scope_extracted_values = extract_field_from_attr_and_combine_schema(
- extra_scope_schema,
+ parse_ctx.extra_scope_schema,
select_info,
- &scope_attrs,
+ &parse_ctx.scope_attrs,
)?;
+
let rows = parse_log(
- extra_log_schema,
select_info,
scope_logs.log_records,
- &resource_attr,
- ScopeInfo {
- scope_name,
- scope_version,
- scope_attrs,
- },
+ parse_ctx,
&resource_extracted_values,
&scope_extracted_values,
)?;
@@ -627,15 +658,9 @@ fn parse_scope(
}
fn parse_log(
- extra_log_schema: &mut SchemaInfo,
select_info: &SelectInfo,
log_records: Vec<LogRecord>,
- resource_attr: &JsonbValue<'static>,
- ScopeInfo {
- scope_name,
- scope_version,
- scope_attrs,
- }: ScopeInfo,
+ parse_ctx: &mut ParseContext,
resource_extracted_values: &[GreptimeValue],
scope_extracted_values: &[GreptimeValue],
) -> Result<Vec<ParseInfo>> {
@@ -644,16 +669,13 @@ fn parse_log(
for log in log_records {
let log_attr = key_value_to_jsonb(log.attributes.clone());
- let row = build_otlp_build_in_row(
- log,
- resource_attr.clone(),
- scope_name.clone(),
- scope_version.clone(),
- scope_attrs.clone(),
- );
+ let row = build_otlp_build_in_row(log, parse_ctx);
- let log_extracted_values =
- extract_field_from_attr_and_combine_schema(extra_log_schema, select_info, &log_attr)?;
+ let log_extracted_values = extract_field_from_attr_and_combine_schema(
+ parse_ctx.extra_log_schema,
+ select_info,
+ &log_attr,
+ )?;
let parse_info = ParseInfo {
values: row,
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index 131ba8363f61..c426af4f5f6a 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -1245,7 +1245,7 @@ pub async fn test_identify_pipeline(store_type: StorageType) {
);
let expected = r#"[["__source__","String","","YES","","FIELD"],["__time__","Int64","","YES","","FIELD"],["__topic__","String","","YES","","FIELD"],["ip","String","","YES","","FIELD"],["status","String","","YES","","FIELD"],["time","String","","YES","","FIELD"],["url","String","","YES","","FIELD"],["user-agent","String","","YES","","FIELD"],["dongdongdong","String","","YES","","FIELD"],["hasagei","String","","YES","","FIELD"],["greptime_timestamp","TimestampNanosecond","PRI","NO","","TIMESTAMP"]]"#;
- validate_data(&client, "desc logs", expected).await;
+ validate_data("identity_schema", &client, "desc logs", expected).await;
guard.remove_all().await;
}
@@ -1527,7 +1527,7 @@ pub async fn test_otlp_metrics(store_type: StorageType) {
// select metrics data
let expected = r#"[[1726053452870391000,9471.0]]"#;
- validate_data(&client, "select * from gen;", expected).await;
+ validate_data("otlp_metrics", &client, "select * from gen;", expected).await;
// drop table
let res = client.get("/v1/sql?sql=drop table gen;").send().await;
@@ -1538,7 +1538,13 @@ pub async fn test_otlp_metrics(store_type: StorageType) {
assert_eq!(StatusCode::OK, res.status());
// select metrics data again
- validate_data(&client, "select * from gen;", expected).await;
+ validate_data(
+ "otlp_metrics_with_gzip",
+ &client,
+ "select * from gen;",
+ expected,
+ )
+ .await;
guard.remove_all().await;
}
@@ -1564,7 +1570,13 @@ pub async fn test_otlp_traces(store_type: StorageType) {
// select traces data
let expected = r#"[["b5e5fb572cf0a3335dd194a14145fef5","3364d2da58c9fd2b","","{\"service.name\":\"telemetrygen\"}","telemetrygen","","{}","","lets-go","SPAN_KIND_CLIENT","STATUS_CODE_UNSET","","{\"net.peer.ip\":\"1.2.3.4\",\"peer.service\":\"telemetrygen-server\"}","[]","[]",1726631197820927000,1726631197821050000,0.123,1726631197820927000],["b5e5fb572cf0a3335dd194a14145fef5","74c82efa6f628e80","3364d2da58c9fd2b","{\"service.name\":\"telemetrygen\"}","telemetrygen","","{}","","okey-dokey-0","SPAN_KIND_SERVER","STATUS_CODE_UNSET","","{\"net.peer.ip\":\"1.2.3.4\",\"peer.service\":\"telemetrygen-client\"}","[]","[]",1726631197820927000,1726631197821050000,0.123,1726631197820927000]]"#;
- validate_data(&client, "select * from traces_preview_v01;", expected).await;
+ validate_data(
+ "otlp_traces",
+ &client,
+ "select * from traces_preview_v01;",
+ expected,
+ )
+ .await;
// drop table
let res = client
@@ -1573,12 +1585,18 @@ pub async fn test_otlp_traces(store_type: StorageType) {
.await;
assert_eq!(res.status(), StatusCode::OK);
- // write metrics data with gzip
+ // write traces data with gzip
let res = send_req(&client, vec![], "/v1/otlp/v1/traces", body.clone(), true).await;
assert_eq!(StatusCode::OK, res.status());
- // select metrics data again
- validate_data(&client, "select * from traces_preview_v01;", expected).await;
+ // select traces data again
+ validate_data(
+ "otlp_traces_with_gzip",
+ &client,
+ "select * from traces_preview_v01;",
+ expected,
+ )
+ .await;
guard.remove_all().await;
}
@@ -1594,8 +1612,27 @@ pub async fn test_otlp_logs(store_type: StorageType) {
let req: ExportLogsServiceRequest = serde_json::from_str(content).unwrap();
let body = req.encode_to_vec();
+
{
// write log data
+ let res = send_req(
+ &client,
+ vec![(
+ HeaderName::from_static("x-greptime-log-table-name"),
+ HeaderValue::from_static("logs1"),
+ )],
+ "/v1/otlp/v1/logs?db=public",
+ body.clone(),
+ false,
+ )
+ .await;
+ assert_eq!(StatusCode::OK, res.status());
+ let expected = r#"[[1581452773000000789,"30","30","Info",9,"something happened",{"customer":"acme","env":"dev"},1,"","",{},"https://opentelemetry.io/schemas/1.0.0/scopeLogs",{"resource-attr":"resource-attr-val-1"},"https://opentelemetry.io/schemas/1.0.0/resourceLogs"],[1581452773000009875,"3038303430323031303030303030303030303030303030303030303030303030","30313032303430383030303030303030","Info",9,"This is a log message",{"app":"server","instance_num":1},1,"","",{},"https://opentelemetry.io/schemas/1.0.0/scopeLogs",{"resource-attr":"resource-attr-val-1"},"https://opentelemetry.io/schemas/1.0.0/resourceLogs"]]"#;
+ validate_data("otlp_logs", &client, "select * from logs1;", expected).await;
+ }
+
+ {
+ // write log data with selector
let res = send_req(
&client,
vec![
@@ -1615,32 +1652,20 @@ pub async fn test_otlp_logs(store_type: StorageType) {
.await;
assert_eq!(StatusCode::OK, res.status());
- let expected = r#"[["","",{},{"resource-attr":"resource-attr-val-1"},{"customer":"acme","env":"dev"},1581452773000000789,1581452773000000789,"30","30",1,"Info",9,"something happened",null,null,"resource-attr-val-1"],["","",{},{"resource-attr":"resource-attr-val-1"},{"app":"server","instance_num":1},1581452773000009875,1581452773000009875,"3038303430323031303030303030303030303030303030303030303030303030","30313032303430383030303030303030",1,"Info",9,"This is a log message","server",1,"resource-attr-val-1"]]"#;
- validate_data(&client, "select * from logs;", expected).await;
- }
-
- {
- // write log data
- let res = send_req(
+ let expected = r#"[[1581452773000000789,"30","30","Info",9,"something happened",{"customer":"acme","env":"dev"},1,"","",{},"https://opentelemetry.io/schemas/1.0.0/scopeLogs",{"resource-attr":"resource-attr-val-1"},"https://opentelemetry.io/schemas/1.0.0/resourceLogs",null,null,"resource-attr-val-1"],[1581452773000009875,"3038303430323031303030303030303030303030303030303030303030303030","30313032303430383030303030303030","Info",9,"This is a log message",{"app":"server","instance_num":1},1,"","",{},"https://opentelemetry.io/schemas/1.0.0/scopeLogs",{"resource-attr":"resource-attr-val-1"},"https://opentelemetry.io/schemas/1.0.0/resourceLogs","server",1,"resource-attr-val-1"]]"#;
+ validate_data(
+ "otlp_logs_with_selector",
&client,
- vec![(
- HeaderName::from_static("x-greptime-log-table-name"),
- HeaderValue::from_static("logs1"),
- )],
- "/v1/otlp/v1/logs?db=public",
- body.clone(),
- false,
+ "select * from logs;",
+ expected,
)
.await;
- assert_eq!(StatusCode::OK, res.status());
- let expected = r#"[["","",{},{"resource-attr":"resource-attr-val-1"},{"customer":"acme","env":"dev"},1581452773000000789,1581452773000000789,"30","30",1,"Info",9,"something happened"],["","",{},{"resource-attr":"resource-attr-val-1"},{"app":"server","instance_num":1},1581452773000009875,1581452773000009875,"3038303430323031303030303030303030303030303030303030303030303030","30313032303430383030303030303030",1,"Info",9,"This is a log message"]]"#;
- validate_data(&client, "select * from logs1;", expected).await;
}
guard.remove_all().await;
}
-async fn validate_data(client: &TestClient, sql: &str, expected: &str) {
+async fn validate_data(test_name: &str, client: &TestClient, sql: &str, expected: &str) {
let res = client
.get(format!("/v1/sql?sql={sql}").as_str())
.send()
@@ -1649,7 +1674,7 @@ async fn validate_data(client: &TestClient, sql: &str, expected: &str) {
let resp = res.text().await;
let v = get_rows_from_output(&resp);
- assert_eq!(v, expected);
+ assert_eq!(v, expected, "validate {test_name} fail");
}
async fn send_req(
|
chore
|
add schema urls to otlp logs (#4876)
|
7efcf868d519faf095243565658d41388f4694dc
|
2023-06-12 08:30:24
|
王听正
|
refactor: Remove MySQL related options from Datanode (#1756)
| false
|
diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs
index 448f11ffe5c0..5e35cb21fbac 100644
--- a/src/cmd/src/datanode.rs
+++ b/src/cmd/src/datanode.rs
@@ -84,8 +84,6 @@ struct StartCommand {
rpc_addr: Option<String>,
#[clap(long)]
rpc_hostname: Option<String>,
- #[clap(long)]
- mysql_addr: Option<String>,
#[clap(long, multiple = true, value_delimiter = ',')]
metasrv_addr: Option<Vec<String>>,
#[clap(short, long)]
@@ -126,10 +124,6 @@ impl StartCommand {
opts.rpc_hostname = self.rpc_hostname.clone();
}
- if let Some(addr) = &self.mysql_addr {
- opts.mysql_addr = addr.clone();
- }
-
if let Some(node_id) = self.node_id {
opts.node_id = Some(node_id);
}
@@ -205,8 +199,6 @@ mod tests {
rpc_addr = "127.0.0.1:3001"
rpc_hostname = "127.0.0.1"
rpc_runtime_size = 8
- mysql_addr = "127.0.0.1:4406"
- mysql_runtime_size = 2
[meta_client_options]
metasrv_addrs = ["127.0.0.1:3002"]
@@ -252,8 +244,6 @@ mod tests {
cmd.load_options(TopLevelOptions::default()).unwrap() else { unreachable!() };
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
- assert_eq!("127.0.0.1:4406".to_string(), options.mysql_addr);
- assert_eq!(2, options.mysql_runtime_size);
assert_eq!(Some(42), options.node_id);
assert_eq!("/other/wal", options.wal.dir.unwrap());
@@ -368,8 +358,6 @@ mod tests {
rpc_addr = "127.0.0.1:3001"
rpc_hostname = "127.0.0.1"
rpc_runtime_size = 8
- mysql_addr = "127.0.0.1:4406"
- mysql_runtime_size = 2
[meta_client_options]
timeout_millis = 3000
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index 3ccc1b6470d3..54d777d3f681 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -340,8 +340,6 @@ pub struct DatanodeOptions {
pub rpc_addr: String,
pub rpc_hostname: Option<String>,
pub rpc_runtime_size: usize,
- pub mysql_addr: String,
- pub mysql_runtime_size: usize,
pub http_opts: HttpOptions,
pub meta_client_options: Option<MetaClientOptions>,
pub wal: WalConfig,
@@ -359,8 +357,6 @@ impl Default for DatanodeOptions {
rpc_addr: "127.0.0.1:3001".to_string(),
rpc_hostname: None,
rpc_runtime_size: 8,
- mysql_addr: "127.0.0.1:4406".to_string(),
- mysql_runtime_size: 2,
http_opts: HttpOptions::default(),
meta_client_options: None,
wal: WalConfig::default(),
diff --git a/tests/runner/src/env.rs b/tests/runner/src/env.rs
index 73ec466b8445..875e266da855 100644
--- a/tests/runner/src/env.rs
+++ b/tests/runner/src/env.rs
@@ -208,7 +208,6 @@ impl Env {
"start".to_string(),
];
args.push(format!("--rpc-addr=0.0.0.0:410{id}"));
- args.push(format!("--mysql-addr=0.0.0.0:420{id}"));
args.push(format!("--http-addr=0.0.0.0:430{id}"));
args.push(format!(
"--data-home=/tmp/greptimedb_datanode_{}",
|
refactor
|
Remove MySQL related options from Datanode (#1756)
|
0cf44e1e47076e6d5188d2756afe79653034d9d1
|
2024-12-26 08:36:25
|
localhost
|
chore: add more info for pipeline dryrun API (#5232)
| false
|
diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs
index c1c331c33744..88a0ad21b623 100644
--- a/src/servers/src/error.rs
+++ b/src/servers/src/error.rs
@@ -159,6 +159,7 @@ pub enum Error {
#[snafu(display("Pipeline management api error"))]
Pipeline {
+ #[snafu(source)]
source: pipeline::error::Error,
#[snafu(implicit)]
location: Location,
diff --git a/src/servers/src/http/event.rs b/src/servers/src/http/event.rs
index c0926af833d6..14e8ad7dd5e4 100644
--- a/src/servers/src/http/event.rs
+++ b/src/servers/src/http/event.rs
@@ -30,6 +30,7 @@ use axum::http::{Request, StatusCode};
use axum::response::{IntoResponse, Response};
use axum::{async_trait, BoxError, Extension, Json, TypedHeader};
use bytes::Bytes;
+use common_error::ext::ErrorExt;
use common_query::prelude::GREPTIME_TIMESTAMP;
use common_query::{Output, OutputData};
use common_telemetry::{error, warn};
@@ -41,13 +42,13 @@ use pipeline::util::to_pipeline_version;
use pipeline::{GreptimeTransformer, PipelineVersion};
use prost::Message;
use serde::{Deserialize, Serialize};
-use serde_json::{Deserializer, Map, Value};
+use serde_json::{json, Deserializer, Map, Value};
use session::context::{Channel, QueryContext, QueryContextRef};
use snafu::{ensure, OptionExt, ResultExt};
use crate::error::{
- CatalogSnafu, DecodeOtlpRequestSnafu, Error, InvalidParameterSnafu, ParseJson5Snafu,
- ParseJsonSnafu, PipelineSnafu, Result, UnsupportedContentTypeSnafu,
+ status_code_to_http_status, CatalogSnafu, DecodeOtlpRequestSnafu, Error, InvalidParameterSnafu,
+ ParseJson5Snafu, ParseJsonSnafu, PipelineSnafu, Result, UnsupportedContentTypeSnafu,
};
use crate::http::extractor::LogTableName;
use crate::http::header::CONTENT_TYPE_PROTOBUF_STR;
@@ -404,6 +405,14 @@ fn check_data_valid(data_len: usize) -> Result<()> {
Ok(())
}
+fn add_step_info_for_pipeline_dryrun_error(step_msg: &str, e: Error) -> Response {
+ let body = Json(json!({
+ "error": format!("{}: {}", step_msg,e.output_msg()),
+ }));
+
+ (status_code_to_http_status(&e.status_code()), body).into_response()
+}
+
#[axum_macros::debug_handler]
pub async fn pipeline_dryrun(
State(log_state): State<LogState>,
@@ -431,8 +440,20 @@ pub async fn pipeline_dryrun(
dryrun_pipeline_inner(data, &pipeline)
}
Some(pipeline) => {
- let pipeline = handler.build_pipeline(&pipeline)?;
- dryrun_pipeline_inner(data, &pipeline)
+ let pipeline = handler.build_pipeline(&pipeline);
+ match pipeline {
+ Ok(pipeline) => match dryrun_pipeline_inner(data, &pipeline) {
+ Ok(response) => Ok(response),
+ Err(e) => Ok(add_step_info_for_pipeline_dryrun_error(
+ "Failed to exec pipeline",
+ e,
+ )),
+ },
+ Err(e) => Ok(add_step_info_for_pipeline_dryrun_error(
+ "Failed to build pipeline",
+ e,
+ )),
+ }
}
}
}
|
chore
|
add more info for pipeline dryrun API (#5232)
|
693e8de83af8160b46dd20109ed23e552cba257a
|
2023-09-17 14:35:28
|
Ruihang Xia
|
feat: scope spawned task with trace id (#2419)
| false
|
diff --git a/src/common/meta/src/ddl/alter_table.rs b/src/common/meta/src/ddl/alter_table.rs
index 7cefa0f5668a..8641d5d7b2a6 100644
--- a/src/common/meta/src/ddl/alter_table.rs
+++ b/src/common/meta/src/ddl/alter_table.rs
@@ -17,7 +17,7 @@ use std::vec;
use api::v1::alter_expr::Kind;
use api::v1::region::{
alter_request, region_request, AddColumn, AddColumns, AlterRequest, DropColumn, DropColumns,
- RegionColumnDef, RegionRequest,
+ RegionColumnDef, RegionRequest, RegionRequestHeader,
};
use api::v1::{AlterExpr, RenameTable};
use async_trait::async_trait;
@@ -201,7 +201,10 @@ impl AlterTableProcedure {
let region_id = RegionId::new(table_id, region);
let request = self.create_alter_region_request(region_id)?;
let request = RegionRequest {
- header: None,
+ header: Some(RegionRequestHeader {
+ trace_id: common_telemetry::trace_id().unwrap_or_default(),
+ ..Default::default()
+ }),
body: Some(region_request::Body::Alter(request)),
};
debug!("Submitting {request:?} to {datanode}");
diff --git a/src/datanode/src/region_server.rs b/src/datanode/src/region_server.rs
index 040c4a595768..601ae1f28874 100644
--- a/src/datanode/src/region_server.rs
+++ b/src/datanode/src/region_server.rs
@@ -135,23 +135,19 @@ impl RegionServerHandler for RegionServer {
.context(ExecuteGrpcRequestSnafu)?;
let join_tasks = requests.into_iter().map(|(region_id, req)| {
let self_to_move = self.clone();
- self.inner
- .runtime
- .spawn(async move { self_to_move.handle_request(region_id, req).await })
+ async move { self_to_move.handle_request(region_id, req).await }
});
let results = try_join_all(join_tasks)
.await
- .context(servers_error::JoinTaskSnafu)?;
+ .map_err(BoxedError::new)
+ .context(ExecuteGrpcRequestSnafu)?;
// merge results by simply sum up affected rows.
// only insert/delete will have multiple results.
let mut affected_rows = 0;
for result in results {
- match result
- .map_err(BoxedError::new)
- .context(ExecuteGrpcRequestSnafu)?
- {
+ match result {
Output::AffectedRows(rows) => affected_rows += rows,
Output::Stream(_) | Output::RecordBatches(_) => {
// TODO: change the output type to only contains `affected_rows`
@@ -181,10 +177,15 @@ impl FlightCraft for RegionServer {
let ticket = request.into_inner().ticket;
let request = QueryRequest::decode(ticket.as_ref())
.context(servers_error::InvalidFlightTicketSnafu)?;
+ let trace_id = request
+ .header
+ .as_ref()
+ .map(|h| h.trace_id)
+ .unwrap_or_default();
let result = self.handle_read(request).await?;
- let stream = Box::pin(FlightRecordBatchStream::new(result));
+ let stream = Box::pin(FlightRecordBatchStream::new(result, trace_id));
Ok(Response::new(stream))
}
}
diff --git a/src/servers/src/grpc/flight.rs b/src/servers/src/grpc/flight.rs
index 6f4f7cbca721..cea21a8a00c6 100644
--- a/src/servers/src/grpc/flight.rs
+++ b/src/servers/src/grpc/flight.rs
@@ -150,23 +150,28 @@ impl FlightCraft for GreptimeRequestHandler {
let ticket = request.into_inner().ticket;
let request =
GreptimeRequest::decode(ticket.as_ref()).context(error::InvalidFlightTicketSnafu)?;
+ let trace_id = request
+ .header
+ .as_ref()
+ .map(|h| h.trace_id)
+ .unwrap_or_default();
let output = self.handle_request(request).await?;
let stream: Pin<Box<dyn Stream<Item = Result<FlightData, Status>> + Send + Sync>> =
- to_flight_data_stream(output);
+ to_flight_data_stream(output, trace_id);
Ok(Response::new(stream))
}
}
-fn to_flight_data_stream(output: Output) -> TonicStream<FlightData> {
+fn to_flight_data_stream(output: Output, trace_id: u64) -> TonicStream<FlightData> {
match output {
Output::Stream(stream) => {
- let stream = FlightRecordBatchStream::new(stream);
+ let stream = FlightRecordBatchStream::new(stream, trace_id);
Box::pin(stream) as _
}
Output::RecordBatches(x) => {
- let stream = FlightRecordBatchStream::new(x.as_stream());
+ let stream = FlightRecordBatchStream::new(x.as_stream(), trace_id);
Box::pin(stream) as _
}
Output::AffectedRows(rows) => {
diff --git a/src/servers/src/grpc/flight/stream.rs b/src/servers/src/grpc/flight/stream.rs
index 5ff570608e33..542b031df887 100644
--- a/src/servers/src/grpc/flight/stream.rs
+++ b/src/servers/src/grpc/flight/stream.rs
@@ -18,7 +18,7 @@ use std::task::{Context, Poll};
use arrow_flight::FlightData;
use common_grpc::flight::{FlightEncoder, FlightMessage};
use common_recordbatch::SendableRecordBatchStream;
-use common_telemetry::warn;
+use common_telemetry::{warn, TRACE_ID};
use futures::channel::mpsc;
use futures::channel::mpsc::Sender;
use futures::{SinkExt, Stream, StreamExt};
@@ -39,12 +39,11 @@ pub struct FlightRecordBatchStream {
}
impl FlightRecordBatchStream {
- pub fn new(recordbatches: SendableRecordBatchStream) -> Self {
+ pub fn new(recordbatches: SendableRecordBatchStream, trace_id: u64) -> Self {
let (tx, rx) = mpsc::channel::<TonicResult<FlightMessage>>(1);
- let join_handle =
- common_runtime::spawn_read(
- async move { Self::flight_data_stream(recordbatches, tx).await },
- );
+ let join_handle = common_runtime::spawn_read(TRACE_ID.scope(trace_id, async move {
+ Self::flight_data_stream(recordbatches, tx).await
+ }));
Self {
rx,
join_handle,
@@ -146,7 +145,7 @@ mod test {
let recordbatches = RecordBatches::try_new(schema.clone(), vec![recordbatch.clone()])
.unwrap()
.as_stream();
- let mut stream = FlightRecordBatchStream::new(recordbatches);
+ let mut stream = FlightRecordBatchStream::new(recordbatches, 0);
let mut raw_data = Vec::with_capacity(2);
raw_data.push(stream.next().await.unwrap().unwrap());
diff --git a/src/servers/src/grpc/greptime_handler.rs b/src/servers/src/grpc/greptime_handler.rs
index a1af9e8d6547..f8dbe3010163 100644
--- a/src/servers/src/grpc/greptime_handler.rs
+++ b/src/servers/src/grpc/greptime_handler.rs
@@ -27,7 +27,7 @@ use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_query::Output;
use common_runtime::Runtime;
-use common_telemetry::logging;
+use common_telemetry::{logging, TRACE_ID};
use metrics::{histogram, increment_counter};
use session::context::{QueryContextBuilder, QueryContextRef};
use snafu::{OptionExt, ResultExt};
@@ -74,6 +74,7 @@ impl GreptimeRequestHandler {
let request_type = request_type(&query).to_string();
let db = query_ctx.get_db_string();
let timer = RequestTimer::new(db.clone(), request_type);
+ let trace_id = query_ctx.trace_id();
// Executes requests in another runtime to
// 1. prevent the execution from being cancelled unexpected by Tonic runtime;
@@ -82,7 +83,7 @@ impl GreptimeRequestHandler {
// - Obtaining a `JoinHandle` to get the panic message (if there's any).
// From its docs, `JoinHandle` is cancel safe. The task keeps running even it's handle been dropped.
// 2. avoid the handler blocks the gRPC runtime incidentally.
- let handle = self.runtime.spawn(async move {
+ let handle = self.runtime.spawn(TRACE_ID.scope(trace_id, async move {
handler.do_query(query, query_ctx).await.map_err(|e| {
if e.status_code().should_log_error() {
logging::error!(e; "Failed to handle request");
@@ -92,7 +93,7 @@ impl GreptimeRequestHandler {
}
e
})
- });
+ }));
handle.await.context(JoinTaskSnafu).map_err(|e| {
timer.record(e.status_code());
diff --git a/src/servers/src/grpc/region_server.rs b/src/servers/src/grpc/region_server.rs
index a93686b026aa..19d075c356f0 100644
--- a/src/servers/src/grpc/region_server.rs
+++ b/src/servers/src/grpc/region_server.rs
@@ -19,7 +19,7 @@ use api::v1::region::{region_request, RegionRequest, RegionResponse};
use async_trait::async_trait;
use common_error::ext::ErrorExt;
use common_runtime::Runtime;
-use common_telemetry::{debug, error};
+use common_telemetry::{debug, error, TRACE_ID};
use snafu::{OptionExt, ResultExt};
use tonic::{Request, Response};
@@ -45,8 +45,14 @@ impl RegionServerRequestHandler {
}
async fn handle(&self, request: RegionRequest) -> Result<RegionResponse> {
+ let trace_id = request
+ .header
+ .context(InvalidQuerySnafu {
+ reason: "Expecting non-empty region request header.",
+ })?
+ .trace_id;
let query = request.body.context(InvalidQuerySnafu {
- reason: "Expecting non-empty GreptimeRequest.",
+ reason: "Expecting non-empty region request body.",
})?;
let handler = self.handler.clone();
@@ -58,7 +64,7 @@ impl RegionServerRequestHandler {
// - Obtaining a `JoinHandle` to get the panic message (if there's any).
// From its docs, `JoinHandle` is cancel safe. The task keeps running even it's handle been dropped.
// 2. avoid the handler blocks the gRPC runtime incidentally.
- let handle = self.runtime.spawn(async move {
+ let handle = self.runtime.spawn(TRACE_ID.scope(trace_id, async move {
handler.handle(query).await.map_err(|e| {
if e.status_code().should_log_error() {
error!(e; "Failed to handle request");
@@ -68,7 +74,7 @@ impl RegionServerRequestHandler {
}
e
})
- });
+ }));
handle.await.context(JoinTaskSnafu)?
}
|
feat
|
scope spawned task with trace id (#2419)
|
5be81abba358c47fc8e90cf8b2227e5553bafe4d
|
2025-02-10 14:44:54
|
Zhenchi
|
feat: add metadata method to puffin reader (#5501)
| false
|
diff --git a/src/puffin/src/puffin_manager.rs b/src/puffin/src/puffin_manager.rs
index 204bc2c66e2e..5217a3e6ccb1 100644
--- a/src/puffin/src/puffin_manager.rs
+++ b/src/puffin/src/puffin_manager.rs
@@ -21,6 +21,7 @@ pub mod stager;
mod tests;
use std::path::PathBuf;
+use std::sync::Arc;
use async_trait::async_trait;
use common_base::range_read::RangeReader;
@@ -28,6 +29,7 @@ use futures::AsyncRead;
use crate::blob_metadata::CompressionCodec;
use crate::error::Result;
+use crate::file_metadata::FileMetadata;
/// The `PuffinManager` trait provides a unified interface for creating `PuffinReader` and `PuffinWriter`.
#[async_trait]
@@ -79,6 +81,9 @@ pub trait PuffinReader {
fn with_file_size_hint(self, file_size_hint: Option<u64>) -> Self;
+ /// Returns the metadata of the Puffin file.
+ async fn metadata(&self) -> Result<Arc<FileMetadata>>;
+
/// Reads a blob from the Puffin file.
///
/// The returned `BlobGuard` is used to access the blob data.
diff --git a/src/puffin/src/puffin_manager/fs_puffin_manager/reader.rs b/src/puffin/src/puffin_manager/fs_puffin_manager/reader.rs
index 9228c0b59424..1202be3e0861 100644
--- a/src/puffin/src/puffin_manager/fs_puffin_manager/reader.rs
+++ b/src/puffin/src/puffin_manager/fs_puffin_manager/reader.rs
@@ -87,6 +87,15 @@ where
self
}
+ async fn metadata(&self) -> Result<Arc<FileMetadata>> {
+ let reader = self
+ .puffin_file_accessor
+ .reader(&self.puffin_file_name)
+ .await?;
+ let mut file = PuffinFileReader::new(reader);
+ self.get_puffin_file_metadata(&mut file).await
+ }
+
async fn blob(&self, key: &str) -> Result<Self::Blob> {
let mut reader = self
.puffin_file_accessor
|
feat
|
add metadata method to puffin reader (#5501)
|
a3e47955b82ae1a77d6173640d577b3f62488aa9
|
2023-04-07 14:20:14
|
LFC
|
feat: information schema (#1327)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 0e131015eb97..4d0667b95f4d 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1136,6 +1136,7 @@ version = "0.1.1"
dependencies = [
"api",
"arc-swap",
+ "arrow-schema",
"async-stream",
"async-trait",
"backoff",
@@ -2209,8 +2210,8 @@ dependencies = [
[[package]]
name = "datafusion"
-version = "21.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=8e125d2ecf242b4f4b81f06839900dbb2037cc2a#8e125d2ecf242b4f4b81f06839900dbb2037cc2a"
+version = "21.1.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=21bf4ffccadfeea824ab6e29c0b872930d0e190a#21bf4ffccadfeea824ab6e29c0b872930d0e190a"
dependencies = [
"ahash 0.8.3",
"arrow",
@@ -2256,8 +2257,8 @@ dependencies = [
[[package]]
name = "datafusion-common"
-version = "21.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=8e125d2ecf242b4f4b81f06839900dbb2037cc2a#8e125d2ecf242b4f4b81f06839900dbb2037cc2a"
+version = "21.1.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=21bf4ffccadfeea824ab6e29c0b872930d0e190a#21bf4ffccadfeea824ab6e29c0b872930d0e190a"
dependencies = [
"arrow",
"arrow-array",
@@ -2270,8 +2271,8 @@ dependencies = [
[[package]]
name = "datafusion-execution"
-version = "21.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=8e125d2ecf242b4f4b81f06839900dbb2037cc2a#8e125d2ecf242b4f4b81f06839900dbb2037cc2a"
+version = "21.1.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=21bf4ffccadfeea824ab6e29c0b872930d0e190a#21bf4ffccadfeea824ab6e29c0b872930d0e190a"
dependencies = [
"dashmap",
"datafusion-common",
@@ -2287,8 +2288,8 @@ dependencies = [
[[package]]
name = "datafusion-expr"
-version = "21.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=8e125d2ecf242b4f4b81f06839900dbb2037cc2a#8e125d2ecf242b4f4b81f06839900dbb2037cc2a"
+version = "21.1.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=21bf4ffccadfeea824ab6e29c0b872930d0e190a#21bf4ffccadfeea824ab6e29c0b872930d0e190a"
dependencies = [
"ahash 0.8.3",
"arrow",
@@ -2298,8 +2299,8 @@ dependencies = [
[[package]]
name = "datafusion-optimizer"
-version = "21.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=8e125d2ecf242b4f4b81f06839900dbb2037cc2a#8e125d2ecf242b4f4b81f06839900dbb2037cc2a"
+version = "21.1.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=21bf4ffccadfeea824ab6e29c0b872930d0e190a#21bf4ffccadfeea824ab6e29c0b872930d0e190a"
dependencies = [
"arrow",
"async-trait",
@@ -2315,11 +2316,12 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
-version = "21.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=8e125d2ecf242b4f4b81f06839900dbb2037cc2a#8e125d2ecf242b4f4b81f06839900dbb2037cc2a"
+version = "21.1.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=21bf4ffccadfeea824ab6e29c0b872930d0e190a#21bf4ffccadfeea824ab6e29c0b872930d0e190a"
dependencies = [
"ahash 0.8.3",
"arrow",
+ "arrow-array",
"arrow-buffer",
"arrow-schema",
"blake2",
@@ -2345,8 +2347,8 @@ dependencies = [
[[package]]
name = "datafusion-row"
-version = "21.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=8e125d2ecf242b4f4b81f06839900dbb2037cc2a#8e125d2ecf242b4f4b81f06839900dbb2037cc2a"
+version = "21.1.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=21bf4ffccadfeea824ab6e29c0b872930d0e190a#21bf4ffccadfeea824ab6e29c0b872930d0e190a"
dependencies = [
"arrow",
"datafusion-common",
@@ -2356,9 +2358,10 @@ dependencies = [
[[package]]
name = "datafusion-sql"
-version = "21.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=8e125d2ecf242b4f4b81f06839900dbb2037cc2a#8e125d2ecf242b4f4b81f06839900dbb2037cc2a"
+version = "21.1.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=21bf4ffccadfeea824ab6e29c0b872930d0e190a#21bf4ffccadfeea824ab6e29c0b872930d0e190a"
dependencies = [
+ "arrow",
"arrow-schema",
"datafusion-common",
"datafusion-expr",
diff --git a/Cargo.toml b/Cargo.toml
index 07ad3cee8edc..b10bc707335d 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -58,12 +58,12 @@ arrow-schema = { version = "36.0", features = ["serde"] }
async-stream = "0.3"
async-trait = "0.1"
chrono = { version = "0.4", features = ["serde"] }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "8e125d2ecf242b4f4b81f06839900dbb2037cc2a" }
-datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "8e125d2ecf242b4f4b81f06839900dbb2037cc2a" }
-datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "8e125d2ecf242b4f4b81f06839900dbb2037cc2a" }
-datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "8e125d2ecf242b4f4b81f06839900dbb2037cc2a" }
-datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "8e125d2ecf242b4f4b81f06839900dbb2037cc2a" }
-datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "8e125d2ecf242b4f4b81f06839900dbb2037cc2a" }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "21bf4ffccadfeea824ab6e29c0b872930d0e190a" }
+datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "21bf4ffccadfeea824ab6e29c0b872930d0e190a" }
+datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "21bf4ffccadfeea824ab6e29c0b872930d0e190a" }
+datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "21bf4ffccadfeea824ab6e29c0b872930d0e190a" }
+datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "21bf4ffccadfeea824ab6e29c0b872930d0e190a" }
+datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "21bf4ffccadfeea824ab6e29c0b872930d0e190a" }
futures = "0.3"
futures-util = "0.3"
parquet = "36.0"
diff --git a/src/catalog/Cargo.toml b/src/catalog/Cargo.toml
index 8492ab44ef58..2bc100018740 100644
--- a/src/catalog/Cargo.toml
+++ b/src/catalog/Cargo.toml
@@ -7,6 +7,7 @@ license.workspace = true
[dependencies]
api = { path = "../api" }
arc-swap = "1.0"
+arrow-schema.workspace = true
async-stream.workspace = true
async-trait = "0.1"
backoff = { version = "0.4", features = ["tokio"] }
diff --git a/src/catalog/src/datafusion.rs b/src/catalog/src/datafusion.rs
new file mode 100644
index 000000000000..f54699455dbb
--- /dev/null
+++ b/src/catalog/src/datafusion.rs
@@ -0,0 +1,15 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+pub mod catalog_adapter;
diff --git a/src/query/src/datafusion/catalog_adapter.rs b/src/catalog/src/datafusion/catalog_adapter.rs
similarity index 89%
rename from src/query/src/datafusion/catalog_adapter.rs
rename to src/catalog/src/datafusion/catalog_adapter.rs
index a2e7ff527380..c83d5f187930 100644
--- a/src/query/src/datafusion/catalog_adapter.rs
+++ b/src/catalog/src/datafusion/catalog_adapter.rs
@@ -18,10 +18,6 @@ use std::any::Any;
use std::sync::Arc;
use async_trait::async_trait;
-use catalog::error::{self as catalog_error, Error};
-use catalog::{
- CatalogListRef, CatalogProvider, CatalogProviderRef, SchemaProvider, SchemaProviderRef,
-};
use common_error::prelude::BoxedError;
use datafusion::catalog::catalog::{
CatalogList as DfCatalogList, CatalogProvider as DfCatalogProvider,
@@ -33,7 +29,10 @@ use snafu::ResultExt;
use table::table::adapter::{DfTableProviderAdapter, TableAdapter};
use table::TableRef;
-use crate::datafusion::error;
+use crate::error::{self, Result, SchemaProviderOperationSnafu};
+use crate::{
+ CatalogListRef, CatalogProvider, CatalogProviderRef, SchemaProvider, SchemaProviderRef,
+};
pub struct DfCatalogListAdapter {
catalog_list: CatalogListRef,
@@ -89,7 +88,7 @@ impl CatalogProvider for CatalogProviderAdapter {
self
}
- fn schema_names(&self) -> catalog::error::Result<Vec<String>> {
+ fn schema_names(&self) -> Result<Vec<String>> {
Ok(self.df_catalog_provider.schema_names())
}
@@ -97,11 +96,11 @@ impl CatalogProvider for CatalogProviderAdapter {
&self,
_name: String,
_schema: SchemaProviderRef,
- ) -> catalog::error::Result<Option<SchemaProviderRef>> {
+ ) -> Result<Option<SchemaProviderRef>> {
todo!("register_schema is not supported in Datafusion catalog provider")
}
- fn schema(&self, name: &str) -> catalog::error::Result<Option<Arc<dyn SchemaProvider>>> {
+ fn schema(&self, name: &str) -> Result<Option<Arc<dyn SchemaProvider>>> {
Ok(self
.df_catalog_provider
.schema(name)
@@ -196,11 +195,11 @@ impl SchemaProvider for SchemaProviderAdapter {
}
/// Retrieves the list of available table names in this schema.
- fn table_names(&self) -> Result<Vec<String>, Error> {
+ fn table_names(&self) -> Result<Vec<String>> {
Ok(self.df_schema_provider.table_names())
}
- async fn table(&self, name: &str) -> Result<Option<TableRef>, Error> {
+ async fn table(&self, name: &str) -> Result<Option<TableRef>> {
let table = self.df_schema_provider.table(name).await;
let table = table.map(|table_provider| {
match table_provider
@@ -219,11 +218,7 @@ impl SchemaProvider for SchemaProviderAdapter {
Ok(table)
}
- fn register_table(
- &self,
- name: String,
- table: TableRef,
- ) -> catalog::error::Result<Option<TableRef>> {
+ fn register_table(&self, name: String, table: TableRef) -> Result<Option<TableRef>> {
let table_provider = Arc::new(DfTableProviderAdapter::new(table.clone()));
Ok(self
.df_schema_provider
@@ -232,43 +227,43 @@ impl SchemaProvider for SchemaProviderAdapter {
msg: "Fail to register table to datafusion",
})
.map_err(BoxedError::new)
- .context(catalog_error::SchemaProviderOperationSnafu)?
+ .context(SchemaProviderOperationSnafu)?
.map(|_| table))
}
- fn rename_table(&self, _name: &str, _new_name: String) -> catalog_error::Result<TableRef> {
+ fn rename_table(&self, _name: &str, _new_name: String) -> Result<TableRef> {
todo!()
}
- fn deregister_table(&self, name: &str) -> catalog::error::Result<Option<TableRef>> {
+ fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
self.df_schema_provider
.deregister_table(name)
.context(error::DatafusionSnafu {
msg: "Fail to deregister table from datafusion",
})
.map_err(BoxedError::new)
- .context(catalog_error::SchemaProviderOperationSnafu)?
+ .context(SchemaProviderOperationSnafu)?
.map(|table| {
let adapter = TableAdapter::new(table)
.context(error::TableSchemaMismatchSnafu)
.map_err(BoxedError::new)
- .context(catalog_error::SchemaProviderOperationSnafu)?;
+ .context(SchemaProviderOperationSnafu)?;
Ok(Arc::new(adapter) as _)
})
.transpose()
}
- fn table_exist(&self, name: &str) -> Result<bool, Error> {
+ fn table_exist(&self, name: &str) -> Result<bool> {
Ok(self.df_schema_provider.table_exist(name))
}
}
#[cfg(test)]
mod tests {
- use catalog::local::{new_memory_catalog_list, MemoryCatalogProvider, MemorySchemaProvider};
use table::table::numbers::NumbersTable;
use super::*;
+ use crate::local::{new_memory_catalog_list, MemoryCatalogProvider, MemorySchemaProvider};
#[test]
#[should_panic]
diff --git a/src/catalog/src/error.rs b/src/catalog/src/error.rs
index c9c46f20687f..36c1e9de39ff 100644
--- a/src/catalog/src/error.rs
+++ b/src/catalog/src/error.rs
@@ -117,6 +117,9 @@ pub enum Error {
location: Location,
},
+ #[snafu(display("Operation {} not supported", op))]
+ NotSupported { op: String, location: Location },
+
#[snafu(display("Failed to open table, table info: {}, source: {}", table_info, source))]
OpenTable {
table_info: String,
@@ -136,6 +139,12 @@ pub enum Error {
source: common_recordbatch::error::Error,
},
+ #[snafu(display("Failed to create recordbatch, source: {}", source))]
+ CreateRecordBatch {
+ #[snafu(backtrace)]
+ source: common_recordbatch::error::Error,
+ },
+
#[snafu(display(
"Failed to insert table creation record to system catalog, source: {}",
source
@@ -226,6 +235,19 @@ pub enum Error {
#[snafu(display("Invalid system table definition: {err_msg}"))]
InvalidSystemTableDef { err_msg: String, location: Location },
+
+ #[snafu(display("{}: {}", msg, source))]
+ Datafusion {
+ msg: String,
+ source: DataFusionError,
+ location: Location,
+ },
+
+ #[snafu(display("Table schema mismatch, source: {}", source))]
+ TableSchemaMismatch {
+ #[snafu(backtrace)]
+ source: table::error::Error,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -247,7 +269,9 @@ impl ErrorExt for Error {
Error::SystemCatalogTypeMismatch { .. } => StatusCode::Internal,
- Error::ReadSystemCatalog { source, .. } => source.status_code(),
+ Error::ReadSystemCatalog { source, .. } | Error::CreateRecordBatch { source } => {
+ source.status_code()
+ }
Error::InvalidCatalogValue { source, .. } | Error::CatalogEntrySerde { source } => {
source.status_code()
}
@@ -264,7 +288,8 @@ impl ErrorExt for Error {
| Error::OpenTable { source, .. }
| Error::CreateTable { source, .. }
| Error::DeregisterTable { source, .. }
- | Error::RegionStats { source, .. } => source.status_code(),
+ | Error::RegionStats { source, .. }
+ | Error::TableSchemaMismatch { source } => source.status_code(),
Error::MetaSrv { source, .. } => source.status_code(),
Error::SystemCatalogTableScan { source } => source.status_code(),
@@ -274,8 +299,9 @@ impl ErrorExt for Error {
source.status_code()
}
- Error::Unimplemented { .. } => StatusCode::Unsupported,
+ Error::Unimplemented { .. } | Error::NotSupported { .. } => StatusCode::Unsupported,
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
+ Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
}
}
diff --git a/src/catalog/src/information_schema.rs b/src/catalog/src/information_schema.rs
new file mode 100644
index 000000000000..f4d70ec6b961
--- /dev/null
+++ b/src/catalog/src/information_schema.rs
@@ -0,0 +1,80 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+mod tables;
+
+use std::any::Any;
+use std::sync::Arc;
+
+use async_trait::async_trait;
+use datafusion::datasource::streaming::{PartitionStream, StreamingTable};
+use snafu::ResultExt;
+use table::table::adapter::TableAdapter;
+use table::TableRef;
+
+use crate::error::{DatafusionSnafu, Result, TableSchemaMismatchSnafu};
+use crate::information_schema::tables::InformationSchemaTables;
+use crate::{CatalogProviderRef, SchemaProvider};
+
+const TABLES: &str = "tables";
+
+pub(crate) struct InformationSchemaProvider {
+ catalog_name: String,
+ catalog_provider: CatalogProviderRef,
+}
+
+impl InformationSchemaProvider {
+ pub(crate) fn new(catalog_name: String, catalog_provider: CatalogProviderRef) -> Self {
+ Self {
+ catalog_name,
+ catalog_provider,
+ }
+ }
+}
+
+#[async_trait]
+impl SchemaProvider for InformationSchemaProvider {
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn table_names(&self) -> Result<Vec<String>> {
+ Ok(vec![TABLES.to_string()])
+ }
+
+ async fn table(&self, name: &str) -> Result<Option<TableRef>> {
+ let table = if name.eq_ignore_ascii_case(TABLES) {
+ Arc::new(InformationSchemaTables::new(
+ self.catalog_name.clone(),
+ self.catalog_provider.clone(),
+ ))
+ } else {
+ return Ok(None);
+ };
+
+ let table = Arc::new(
+ StreamingTable::try_new(table.schema().clone(), vec![table]).with_context(|_| {
+ DatafusionSnafu {
+ msg: format!("Failed to get InformationSchema table '{name}'"),
+ }
+ })?,
+ );
+ let table = TableAdapter::new(table).context(TableSchemaMismatchSnafu)?;
+ Ok(Some(Arc::new(table)))
+ }
+
+ fn table_exist(&self, name: &str) -> Result<bool> {
+ Ok(matches!(name.to_ascii_lowercase().as_str(), TABLES))
+ }
+}
diff --git a/src/catalog/src/information_schema/tables.rs b/src/catalog/src/information_schema/tables.rs
new file mode 100644
index 000000000000..311964c5aabc
--- /dev/null
+++ b/src/catalog/src/information_schema/tables.rs
@@ -0,0 +1,165 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use arrow_schema::SchemaRef as ArrowSchemaRef;
+use common_catalog::consts::INFORMATION_SCHEMA_NAME;
+use common_query::physical_plan::TaskContext;
+use common_recordbatch::RecordBatch;
+use datafusion::datasource::streaming::PartitionStream as DfPartitionStream;
+use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
+use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
+use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
+use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
+use datatypes::vectors::StringVectorBuilder;
+use snafu::ResultExt;
+use table::metadata::TableType;
+
+use crate::error::{CreateRecordBatchSnafu, Result};
+use crate::information_schema::TABLES;
+use crate::CatalogProviderRef;
+
+pub(super) struct InformationSchemaTables {
+ schema: SchemaRef,
+ catalog_name: String,
+ catalog_provider: CatalogProviderRef,
+}
+
+impl InformationSchemaTables {
+ pub(super) fn new(catalog_name: String, catalog_provider: CatalogProviderRef) -> Self {
+ let schema = Arc::new(Schema::new(vec![
+ ColumnSchema::new("table_catalog", ConcreteDataType::string_datatype(), false),
+ ColumnSchema::new("table_schema", ConcreteDataType::string_datatype(), false),
+ ColumnSchema::new("table_name", ConcreteDataType::string_datatype(), false),
+ ColumnSchema::new("table_type", ConcreteDataType::string_datatype(), false),
+ ]));
+ Self {
+ schema,
+ catalog_name,
+ catalog_provider,
+ }
+ }
+
+ fn builder(&self) -> InformationSchemaTablesBuilder {
+ InformationSchemaTablesBuilder::new(
+ self.schema.clone(),
+ self.catalog_name.clone(),
+ self.catalog_provider.clone(),
+ )
+ }
+}
+
+/// Builds the `information_schema.TABLE` table row by row
+///
+/// Columns are based on <https://www.postgresql.org/docs/current/infoschema-columns.html>
+struct InformationSchemaTablesBuilder {
+ schema: SchemaRef,
+ catalog_name: String,
+ catalog_provider: CatalogProviderRef,
+
+ catalog_names: StringVectorBuilder,
+ schema_names: StringVectorBuilder,
+ table_names: StringVectorBuilder,
+ table_types: StringVectorBuilder,
+}
+
+impl InformationSchemaTablesBuilder {
+ fn new(schema: SchemaRef, catalog_name: String, catalog_provider: CatalogProviderRef) -> Self {
+ Self {
+ schema,
+ catalog_name,
+ catalog_provider,
+ catalog_names: StringVectorBuilder::with_capacity(42),
+ schema_names: StringVectorBuilder::with_capacity(42),
+ table_names: StringVectorBuilder::with_capacity(42),
+ table_types: StringVectorBuilder::with_capacity(42),
+ }
+ }
+
+ /// Construct the `information_schema.tables` virtual table
+ async fn make_tables(&mut self) -> Result<RecordBatch> {
+ let catalog_name = self.catalog_name.clone();
+
+ for schema_name in self.catalog_provider.schema_names()? {
+ if schema_name == INFORMATION_SCHEMA_NAME {
+ continue;
+ }
+
+ let Some(schema) = self.catalog_provider.schema(&schema_name)? else { continue };
+ for table_name in schema.table_names()? {
+ let Some(table) = schema.table(&table_name).await? else { continue };
+ self.add_table(&catalog_name, &schema_name, &table_name, table.table_type());
+ }
+ }
+
+ // Add a final list for the information schema tables themselves
+ self.add_table(
+ &catalog_name,
+ INFORMATION_SCHEMA_NAME,
+ TABLES,
+ TableType::View,
+ );
+
+ self.finish()
+ }
+
+ fn add_table(
+ &mut self,
+ catalog_name: &str,
+ schema_name: &str,
+ table_name: &str,
+ table_type: TableType,
+ ) {
+ self.catalog_names.push(Some(catalog_name));
+ self.schema_names.push(Some(schema_name));
+ self.table_names.push(Some(table_name));
+ self.table_types.push(Some(match table_type {
+ TableType::Base => "BASE TABLE",
+ TableType::View => "VIEW",
+ TableType::Temporary => "LOCAL TEMPORARY",
+ }));
+ }
+
+ fn finish(&mut self) -> Result<RecordBatch> {
+ let columns: Vec<VectorRef> = vec![
+ Arc::new(self.catalog_names.finish()),
+ Arc::new(self.schema_names.finish()),
+ Arc::new(self.table_names.finish()),
+ Arc::new(self.table_types.finish()),
+ ];
+ RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
+ }
+}
+
+impl DfPartitionStream for InformationSchemaTables {
+ fn schema(&self) -> &ArrowSchemaRef {
+ self.schema.arrow_schema()
+ }
+
+ fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
+ let schema = self.schema().clone();
+ let mut builder = self.builder();
+ Box::pin(DfRecordBatchStreamAdapter::new(
+ schema,
+ futures::stream::once(async move {
+ builder
+ .make_tables()
+ .await
+ .map(|x| x.into_df_record_batch())
+ .map_err(Into::into)
+ }),
+ ))
+ }
+}
diff --git a/src/catalog/src/lib.rs b/src/catalog/src/lib.rs
index 84f70044c3fe..41d640049d7b 100644
--- a/src/catalog/src/lib.rs
+++ b/src/catalog/src/lib.rs
@@ -29,8 +29,10 @@ use table::TableRef;
use crate::error::{CreateTableSnafu, Result};
pub use crate::schema::{SchemaProvider, SchemaProviderRef};
+pub mod datafusion;
pub mod error;
pub mod helper;
+pub(crate) mod information_schema;
pub mod local;
pub mod remote;
pub mod schema;
diff --git a/src/catalog/src/local/manager.rs b/src/catalog/src/local/manager.rs
index ed3d528f451c..e546d90c7527 100644
--- a/src/catalog/src/local/manager.rs
+++ b/src/catalog/src/local/manager.rs
@@ -74,7 +74,7 @@ impl LocalCatalogManager {
})?;
let table = SystemCatalogTable::new(engine.clone()).await?;
let memory_catalog_list = crate::local::memory::new_memory_catalog_list()?;
- let system_catalog = Arc::new(SystemCatalog::new(table, memory_catalog_list.clone()));
+ let system_catalog = Arc::new(SystemCatalog::new(table));
Ok(Self {
system: system_catalog,
catalogs: memory_catalog_list,
@@ -305,9 +305,7 @@ impl CatalogList for LocalCatalogManager {
}
fn catalog_names(&self) -> Result<Vec<String>> {
- let mut res = self.catalogs.catalog_names()?;
- res.push(SYSTEM_CATALOG_NAME.to_string());
- Ok(res)
+ self.catalogs.catalog_names()
}
fn catalog(&self, name: &str) -> Result<Option<CatalogProviderRef>> {
diff --git a/src/catalog/src/remote/manager.rs b/src/catalog/src/remote/manager.rs
index 5279a66ce3c4..5acb9b87b5f7 100644
--- a/src/catalog/src/remote/manager.rs
+++ b/src/catalog/src/remote/manager.rs
@@ -189,7 +189,9 @@ impl RemoteCatalogManager {
let max_table_id = MIN_USER_TABLE_ID - 1;
// initiate default catalog and schema
- let default_catalog = self.initiate_default_catalog().await?;
+ let default_catalog = self
+ .create_catalog_and_schema(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
+ .await?;
res.insert(DEFAULT_CATALOG_NAME.to_string(), default_catalog);
info!("Default catalog and schema registered");
@@ -269,13 +271,19 @@ impl RemoteCatalogManager {
Ok(())
}
- async fn initiate_default_catalog(&self) -> Result<CatalogProviderRef> {
- let default_catalog = self.new_catalog_provider(DEFAULT_CATALOG_NAME);
- let default_schema = self.new_schema_provider(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME);
- default_catalog.register_schema(DEFAULT_SCHEMA_NAME.to_string(), default_schema.clone())?;
+ pub async fn create_catalog_and_schema(
+ &self,
+ catalog_name: &str,
+ schema_name: &str,
+ ) -> Result<CatalogProviderRef> {
+ let schema_provider = self.new_schema_provider(catalog_name, schema_name);
+
+ let catalog_provider = self.new_catalog_provider(catalog_name);
+ catalog_provider.register_schema(schema_name.to_string(), schema_provider.clone())?;
+
let schema_key = SchemaKey {
- schema_name: DEFAULT_SCHEMA_NAME.to_string(),
- catalog_name: DEFAULT_CATALOG_NAME.to_string(),
+ catalog_name: catalog_name.to_string(),
+ schema_name: schema_name.to_string(),
}
.to_string();
self.backend
@@ -286,10 +294,10 @@ impl RemoteCatalogManager {
.context(InvalidCatalogValueSnafu)?,
)
.await?;
- info!("Registered default schema");
+ info!("Created schema '{schema_key}'");
let catalog_key = CatalogKey {
- catalog_name: DEFAULT_CATALOG_NAME.to_string(),
+ catalog_name: catalog_name.to_string(),
}
.to_string();
self.backend
@@ -300,8 +308,8 @@ impl RemoteCatalogManager {
.context(InvalidCatalogValueSnafu)?,
)
.await?;
- info!("Registered default catalog");
- Ok(default_catalog)
+ info!("Created catalog '{catalog_key}");
+ Ok(catalog_provider)
}
async fn open_or_create_table(
diff --git a/src/catalog/src/schema.rs b/src/catalog/src/schema.rs
index 1c9dd11744b4..9dcf329657d1 100644
--- a/src/catalog/src/schema.rs
+++ b/src/catalog/src/schema.rs
@@ -18,7 +18,7 @@ use std::sync::Arc;
use async_trait::async_trait;
use table::TableRef;
-use crate::error::Result;
+use crate::error::{NotSupportedSnafu, Result};
/// Represents a schema, comprising a number of named tables.
#[async_trait]
@@ -35,15 +35,30 @@ pub trait SchemaProvider: Sync + Send {
/// If supported by the implementation, adds a new table to this schema.
/// If a table of the same name existed before, it returns "Table already exists" error.
- fn register_table(&self, name: String, table: TableRef) -> Result<Option<TableRef>>;
+ fn register_table(&self, name: String, _table: TableRef) -> Result<Option<TableRef>> {
+ NotSupportedSnafu {
+ op: format!("register_table({name}, <table>)"),
+ }
+ .fail()
+ }
/// If supported by the implementation, renames an existing table from this schema and returns it.
/// If no table of that name exists, returns "Table not found" error.
- fn rename_table(&self, name: &str, new_name: String) -> Result<TableRef>;
+ fn rename_table(&self, name: &str, new_name: String) -> Result<TableRef> {
+ NotSupportedSnafu {
+ op: format!("rename_table({name}, {new_name})"),
+ }
+ .fail()
+ }
/// If supported by the implementation, removes an existing table from this schema and returns it.
/// If no table of that name exists, returns Ok(None).
- fn deregister_table(&self, name: &str) -> Result<Option<TableRef>>;
+ fn deregister_table(&self, name: &str) -> Result<Option<TableRef>> {
+ NotSupportedSnafu {
+ op: format!("deregister_table({name})"),
+ }
+ .fail()
+ }
/// If supported by the implementation, checks the table exist in the schema provider or not.
/// If no matched table in the schema provider, return false.
diff --git a/src/catalog/src/table_source.rs b/src/catalog/src/table_source.rs
index 0a7c9a5ca080..81a0840a4c52 100644
--- a/src/catalog/src/table_source.rs
+++ b/src/catalog/src/table_source.rs
@@ -15,6 +15,7 @@
use std::collections::HashMap;
use std::sync::Arc;
+use common_catalog::consts::INFORMATION_SCHEMA_NAME;
use common_catalog::format_full_table_name;
use datafusion::common::{ResolvedTableReference, TableReference};
use datafusion::datasource::provider_as_source;
@@ -26,6 +27,7 @@ use table::table::adapter::DfTableProviderAdapter;
use crate::error::{
CatalogNotFoundSnafu, QueryAccessDeniedSnafu, Result, SchemaNotFoundSnafu, TableNotExistSnafu,
};
+use crate::information_schema::InformationSchemaProvider;
use crate::CatalogListRef;
pub struct DfTableSourceProvider {
@@ -100,14 +102,25 @@ impl DfTableSourceProvider {
let schema_name = table_ref.schema.as_ref();
let table_name = table_ref.table.as_ref();
- let catalog = self
- .catalog_list
- .catalog(catalog_name)?
- .context(CatalogNotFoundSnafu { catalog_name })?;
- let schema = catalog.schema(schema_name)?.context(SchemaNotFoundSnafu {
- catalog: catalog_name,
- schema: schema_name,
- })?;
+ let schema = if schema_name != INFORMATION_SCHEMA_NAME {
+ let catalog = self
+ .catalog_list
+ .catalog(catalog_name)?
+ .context(CatalogNotFoundSnafu { catalog_name })?;
+ catalog.schema(schema_name)?.context(SchemaNotFoundSnafu {
+ catalog: catalog_name,
+ schema: schema_name,
+ })?
+ } else {
+ let catalog_provider = self
+ .catalog_list
+ .catalog(catalog_name)?
+ .context(CatalogNotFoundSnafu { catalog_name })?;
+ Arc::new(InformationSchemaProvider::new(
+ catalog_name.to_string(),
+ catalog_provider,
+ ))
+ };
let table = schema
.table(table_name)
.await?
diff --git a/src/catalog/src/tables.rs b/src/catalog/src/tables.rs
index 64e2c65b9751..826262130992 100644
--- a/src/catalog/src/tables.rs
+++ b/src/catalog/src/tables.rs
@@ -15,27 +15,12 @@
// The `tables` table in system catalog keeps a record of all tables created by user.
use std::any::Any;
-use std::pin::Pin;
use std::sync::Arc;
-use std::task::{Context, Poll};
-use async_stream::stream;
use async_trait::async_trait;
use common_catalog::consts::{INFORMATION_SCHEMA_NAME, SYSTEM_CATALOG_TABLE_NAME};
-use common_error::ext::BoxedError;
-use common_query::logical_plan::Expr;
-use common_query::physical_plan::PhysicalPlanRef;
-use common_recordbatch::error::Result as RecordBatchResult;
-use common_recordbatch::{RecordBatch, RecordBatchStream};
-use datatypes::prelude::{ConcreteDataType, DataType};
-use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
-use datatypes::value::ValueRef;
-use datatypes::vectors::VectorRef;
-use futures::Stream;
use snafu::ResultExt;
-use table::error::TablesRecordBatchSnafu;
-use table::metadata::{TableId, TableInfoRef};
-use table::table::scan::SimpleTableScan;
+use table::metadata::TableId;
use table::{Table, TableRef};
use crate::error::{self, Error, InsertCatalogRecordSnafu, Result as CatalogResult};
@@ -43,152 +28,9 @@ use crate::system::{
build_schema_insert_request, build_table_deletion_request, build_table_insert_request,
SystemCatalogTable,
};
-use crate::{
- CatalogListRef, CatalogProvider, DeregisterTableRequest, SchemaProvider, SchemaProviderRef,
-};
-
-/// Tables holds all tables created by user.
-pub struct Tables {
- schema: SchemaRef,
- catalogs: CatalogListRef,
-}
-
-impl Tables {
- pub fn new(catalogs: CatalogListRef) -> Self {
- Self {
- schema: Arc::new(build_schema_for_tables()),
- catalogs,
- }
- }
-}
-
-#[async_trait::async_trait]
-impl Table for Tables {
- fn as_any(&self) -> &dyn Any {
- self
- }
-
- fn schema(&self) -> SchemaRef {
- self.schema.clone()
- }
-
- fn table_info(&self) -> TableInfoRef {
- unreachable!("Tables does not support table_info method")
- }
-
- async fn scan(
- &self,
- _projection: Option<&Vec<usize>>,
- _filters: &[Expr],
- _limit: Option<usize>,
- ) -> table::error::Result<PhysicalPlanRef> {
- let catalogs = self.catalogs.clone();
- let schema_ref = self.schema.clone();
-
- let stream = stream!({
- for catalog_name in catalogs
- .catalog_names()
- .map_err(BoxedError::new)
- .context(TablesRecordBatchSnafu)?
- {
- let catalog = catalogs
- .catalog(&catalog_name)
- .map_err(BoxedError::new)
- .context(TablesRecordBatchSnafu)?
- .unwrap();
- for schema_name in catalog
- .schema_names()
- .map_err(BoxedError::new)
- .context(TablesRecordBatchSnafu)?
- {
- let schema = catalog
- .schema(&schema_name)
- .map_err(BoxedError::new)
- .context(TablesRecordBatchSnafu)?
- .unwrap();
- let names = schema
- .table_names()
- .map_err(BoxedError::new)
- .context(TablesRecordBatchSnafu)?;
- let mut tables = Vec::with_capacity(names.len());
-
- for name in names {
- let table = schema
- .table(&name)
- .await
- .map_err(BoxedError::new)
- .context(TablesRecordBatchSnafu)?
- .unwrap();
-
- tables.push(table);
- }
-
- let vec = tables_to_record_batch(&catalog_name, &schema_name, tables);
- let record_batch_res = RecordBatch::new(schema_ref.clone(), vec);
- yield record_batch_res;
- }
- }
- });
-
- let stream = Box::pin(TablesRecordBatchStream {
- schema: self.schema.clone(),
- stream: Box::pin(stream),
- });
- Ok(Arc::new(SimpleTableScan::new(stream)))
- }
-}
-
-/// Convert tables info to `RecordBatch`.
-fn tables_to_record_batch(
- catalog_name: &str,
- schema_name: &str,
- tables: Vec<TableRef>,
-) -> Vec<VectorRef> {
- let mut catalog_vec = ConcreteDataType::string_datatype().create_mutable_vector(tables.len());
- let mut schema_vec = ConcreteDataType::string_datatype().create_mutable_vector(tables.len());
- let mut table_name_vec =
- ConcreteDataType::string_datatype().create_mutable_vector(tables.len());
- let mut engine_vec = ConcreteDataType::string_datatype().create_mutable_vector(tables.len());
-
- for table in tables {
- let name = &table.table_info().name;
- let engine = &table.table_info().meta.engine;
- // Safety: All these vectors are string type.
- catalog_vec.push_value_ref(ValueRef::String(catalog_name));
- schema_vec.push_value_ref(ValueRef::String(schema_name));
- table_name_vec.push_value_ref(ValueRef::String(name));
- engine_vec.push_value_ref(ValueRef::String(engine));
- }
-
- vec![
- catalog_vec.to_vector(),
- schema_vec.to_vector(),
- table_name_vec.to_vector(),
- engine_vec.to_vector(),
- ]
-}
-
-pub struct TablesRecordBatchStream {
- schema: SchemaRef,
- stream: Pin<Box<dyn Stream<Item = RecordBatchResult<RecordBatch>> + Send>>,
-}
-
-impl Stream for TablesRecordBatchStream {
- type Item = RecordBatchResult<RecordBatch>;
-
- fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
- Pin::new(&mut self.stream).poll_next(cx)
- }
-}
-
-impl RecordBatchStream for TablesRecordBatchStream {
- fn schema(&self) -> SchemaRef {
- self.schema.clone()
- }
-}
+use crate::{CatalogProvider, DeregisterTableRequest, SchemaProvider, SchemaProviderRef};
pub struct InformationSchema {
- pub tables: Arc<Tables>,
pub system: Arc<SystemCatalogTable>,
}
@@ -199,41 +41,19 @@ impl SchemaProvider for InformationSchema {
}
fn table_names(&self) -> Result<Vec<String>, Error> {
- Ok(vec![
- "tables".to_string(),
- SYSTEM_CATALOG_TABLE_NAME.to_string(),
- ])
+ Ok(vec![SYSTEM_CATALOG_TABLE_NAME.to_string()])
}
async fn table(&self, name: &str) -> Result<Option<TableRef>, Error> {
- if name.eq_ignore_ascii_case("tables") {
- Ok(Some(self.tables.clone()))
- } else if name.eq_ignore_ascii_case(SYSTEM_CATALOG_TABLE_NAME) {
+ if name.eq_ignore_ascii_case(SYSTEM_CATALOG_TABLE_NAME) {
Ok(Some(self.system.clone()))
} else {
Ok(None)
}
}
- fn register_table(
- &self,
- _name: String,
- _table: TableRef,
- ) -> crate::error::Result<Option<TableRef>> {
- panic!("System catalog & schema does not support register table")
- }
-
- fn rename_table(&self, _name: &str, _new_name: String) -> crate::error::Result<TableRef> {
- unimplemented!("System catalog & schema does not support rename table")
- }
-
- fn deregister_table(&self, _name: &str) -> crate::error::Result<Option<TableRef>> {
- panic!("System catalog & schema does not support deregister table")
- }
-
fn table_exist(&self, name: &str) -> Result<bool, Error> {
- Ok(name.eq_ignore_ascii_case("tables")
- || name.eq_ignore_ascii_case(SYSTEM_CATALOG_TABLE_NAME))
+ Ok(name.eq_ignore_ascii_case(SYSTEM_CATALOG_TABLE_NAME))
}
}
@@ -242,9 +62,8 @@ pub struct SystemCatalog {
}
impl SystemCatalog {
- pub fn new(system: SystemCatalogTable, catalogs: CatalogListRef) -> Self {
+ pub(crate) fn new(system: SystemCatalogTable) -> Self {
let schema = InformationSchema {
- tables: Arc::new(Tables::new(catalogs)),
system: Arc::new(system),
};
Self {
@@ -322,107 +141,3 @@ impl CatalogProvider for SystemCatalog {
}
}
}
-
-fn build_schema_for_tables() -> Schema {
- let cols = vec![
- ColumnSchema::new(
- "catalog".to_string(),
- ConcreteDataType::string_datatype(),
- false,
- ),
- ColumnSchema::new(
- "schema".to_string(),
- ConcreteDataType::string_datatype(),
- false,
- ),
- ColumnSchema::new(
- "table_name".to_string(),
- ConcreteDataType::string_datatype(),
- false,
- ),
- ColumnSchema::new(
- "engine".to_string(),
- ConcreteDataType::string_datatype(),
- false,
- ),
- ];
- Schema::new(cols)
-}
-
-#[cfg(test)]
-mod tests {
- use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
- use common_query::physical_plan::SessionContext;
- use futures_util::StreamExt;
- use table::table::numbers::NumbersTable;
-
- use super::*;
- use crate::local::memory::new_memory_catalog_list;
- use crate::CatalogList;
-
- #[tokio::test]
- async fn test_tables() {
- let catalog_list = new_memory_catalog_list().unwrap();
- let schema = catalog_list
- .catalog(DEFAULT_CATALOG_NAME)
- .unwrap()
- .unwrap()
- .schema(DEFAULT_SCHEMA_NAME)
- .unwrap()
- .unwrap();
- schema
- .register_table(
- "test_table".to_string(),
- Arc::new(NumbersTable::with_name(1, "test_table".to_string())),
- )
- .unwrap();
-
- let tables = Tables::new(catalog_list);
- let tables_stream = tables.scan(None, &[], None).await.unwrap();
- let session_ctx = SessionContext::new();
- let mut tables_stream = tables_stream.execute(0, session_ctx.task_ctx()).unwrap();
-
- if let Some(t) = tables_stream.next().await {
- let batch = t.unwrap();
- assert_eq!(1, batch.num_rows());
- assert_eq!(4, batch.num_columns());
- assert_eq!(
- ConcreteDataType::string_datatype(),
- batch.column(0).data_type()
- );
- assert_eq!(
- ConcreteDataType::string_datatype(),
- batch.column(1).data_type()
- );
- assert_eq!(
- ConcreteDataType::string_datatype(),
- batch.column(2).data_type()
- );
- assert_eq!(
- ConcreteDataType::string_datatype(),
- batch.column(3).data_type()
- );
- assert_eq!(
- "greptime",
- batch.column(0).get_ref(0).as_string().unwrap().unwrap()
- );
-
- assert_eq!(
- "public",
- batch.column(1).get_ref(0).as_string().unwrap().unwrap()
- );
-
- assert_eq!(
- "test_table",
- batch.column(2).get_ref(0).as_string().unwrap().unwrap()
- );
-
- assert_eq!(
- "test_engine",
- batch.column(3).get_ref(0).as_string().unwrap().unwrap()
- );
- } else {
- panic!("Record batch should not be empty!")
- }
- }
-}
diff --git a/src/common/substrait/src/df_logical.rs b/src/common/substrait/src/df_logical.rs
index a3956fc02734..faa426d338e1 100644
--- a/src/common/substrait/src/df_logical.rs
+++ b/src/common/substrait/src/df_logical.rs
@@ -398,7 +398,6 @@ impl DFLogicalSubstraitConvertor {
| LogicalPlan::CreateCatalog(_)
| LogicalPlan::DropView(_)
| LogicalPlan::Distinct(_)
- | LogicalPlan::SetVariable(_)
| LogicalPlan::CreateExternalTable(_)
| LogicalPlan::CreateMemoryTable(_)
| LogicalPlan::DropTable(_)
@@ -409,7 +408,8 @@ impl DFLogicalSubstraitConvertor {
| LogicalPlan::Prepare(_)
| LogicalPlan::Dml(_)
| LogicalPlan::DescribeTable(_)
- | LogicalPlan::Unnest(_) => InvalidParametersSnafu {
+ | LogicalPlan::Unnest(_)
+ | LogicalPlan::Statement(_) => InvalidParametersSnafu {
reason: format!(
"Trying to convert DDL/DML plan to substrait proto, plan: {plan:?}",
),
diff --git a/src/frontend/src/catalog.rs b/src/frontend/src/catalog.rs
index d54c9e72ac95..af35d1d45967 100644
--- a/src/frontend/src/catalog.rs
+++ b/src/frontend/src/catalog.rs
@@ -413,22 +413,6 @@ impl SchemaProvider for FrontendSchemaProvider {
Ok(Some(table))
}
- fn register_table(
- &self,
- _name: String,
- _table: TableRef,
- ) -> catalog::error::Result<Option<TableRef>> {
- unimplemented!("Frontend schema provider does not support register table")
- }
-
- fn rename_table(&self, _name: &str, _new_name: String) -> catalog_err::Result<TableRef> {
- unimplemented!("Frontend schema provider does not support rename table")
- }
-
- fn deregister_table(&self, _name: &str) -> catalog::error::Result<Option<TableRef>> {
- unimplemented!("Frontend schema provider does not support deregister table")
- }
-
fn table_exist(&self, name: &str) -> catalog::error::Result<bool> {
Ok(self.table_names()?.contains(&name.to_string()))
}
diff --git a/src/frontend/src/tests.rs b/src/frontend/src/tests.rs
index ccd2d9e3cd74..e9e88648aafd 100644
--- a/src/frontend/src/tests.rs
+++ b/src/frontend/src/tests.rs
@@ -20,7 +20,9 @@ use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
-use catalog::remote::MetaKvBackend;
+use catalog::local::{MemoryCatalogProvider, MemorySchemaProvider};
+use catalog::remote::{MetaKvBackend, RemoteCatalogManager};
+use catalog::CatalogProvider;
use client::Client;
use common_grpc::channel_manager::ChannelManager;
use common_runtime::Builder as RuntimeBuilder;
@@ -87,6 +89,20 @@ pub(crate) async fn create_standalone_instance(test_name: &str) -> MockStandalon
let frontend_instance = Instance::try_new_standalone(dn_instance.clone())
.await
.unwrap();
+
+ // create another catalog and schema for testing
+ let another_catalog = Arc::new(MemoryCatalogProvider::new());
+ let _ = another_catalog
+ .register_schema(
+ "another_schema".to_string(),
+ Arc::new(MemorySchemaProvider::new()),
+ )
+ .unwrap();
+ let _ = dn_instance
+ .catalog_manager()
+ .register_catalog("another_catalog".to_string(), another_catalog)
+ .unwrap();
+
dn_instance.start().await.unwrap();
MockStandaloneInstance {
instance: Arc::new(frontend_instance),
@@ -209,6 +225,16 @@ async fn create_distributed_datanode(
);
instance.start().await.unwrap();
+ // create another catalog and schema for testing
+ let _ = instance
+ .catalog_manager()
+ .as_any()
+ .downcast_ref::<RemoteCatalogManager>()
+ .unwrap()
+ .create_catalog_and_schema("another_catalog", "another_schema")
+ .await
+ .unwrap();
+
(
instance,
TestGuard {
diff --git a/src/frontend/src/tests/instance_test.rs b/src/frontend/src/tests/instance_test.rs
index e2bb4083e6ee..26e36dad0e94 100644
--- a/src/frontend/src/tests/instance_test.rs
+++ b/src/frontend/src/tests/instance_test.rs
@@ -15,7 +15,7 @@
use std::env;
use std::sync::Arc;
-use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
+use common_catalog::consts::DEFAULT_CATALOG_NAME;
use common_query::Output;
use common_recordbatch::util;
use common_telemetry::logging;
@@ -23,9 +23,9 @@ use datatypes::vectors::{Int64Vector, StringVector, UInt64Vector, VectorRef};
use rstest::rstest;
use rstest_reuse::apply;
use servers::query_handler::sql::SqlQueryHandler;
-use session::context::QueryContext;
+use session::context::{QueryContext, QueryContextRef};
-use crate::error::Error;
+use crate::error::{Error, Result};
use crate::instance::Instance;
use crate::tests::test_util::{
both_instances_cases, check_output_stream, check_unordered_output_stream, distributed,
@@ -246,8 +246,7 @@ async fn test_execute_insert_by_select(instance: Arc<dyn MockInstance>) {
+-------+------+--------+---------------------+
| host1 | 66.6 | 1024.0 | 2022-06-15T07:02:37 |
| host2 | 88.8 | 333.3 | 2022-06-15T07:02:38 |
-+-------+------+--------+---------------------+"
- .to_string();
++-------+------+--------+---------------------+";
check_output_stream(output, expected).await;
}
@@ -451,51 +450,58 @@ async fn test_rename_table(instance: Arc<dyn MockInstance>) {
let output = execute_sql(&instance, "create database db").await;
assert!(matches!(output, Output::AffectedRows(1)));
- let output = execute_sql_in_db(
+ let query_ctx = Arc::new(QueryContext::with(DEFAULT_CATALOG_NAME, "db"));
+ let output = execute_sql_with(
&instance,
"create table demo(host string, cpu double, memory double, ts timestamp, time index(ts))",
- "db",
+ query_ctx.clone(),
)
.await;
assert!(matches!(output, Output::AffectedRows(0)));
// make sure table insertion is ok before altering table name
- let output = execute_sql_in_db(
+ let output = execute_sql_with(
&instance,
"insert into demo(host, cpu, memory, ts) values ('host1', 1.1, 100, 1000), ('host2', 2.2, 200, 2000)",
- "db",
+ query_ctx.clone(),
)
.await;
assert!(matches!(output, Output::AffectedRows(2)));
// rename table
- let output = execute_sql_in_db(&instance, "alter table demo rename test_table", "db").await;
+ let output = execute_sql_with(
+ &instance,
+ "alter table demo rename test_table",
+ query_ctx.clone(),
+ )
+ .await;
assert!(matches!(output, Output::AffectedRows(0)));
- let output = execute_sql_in_db(&instance, "show tables", "db").await;
+ let output = execute_sql_with(&instance, "show tables", query_ctx.clone()).await;
let expect = "\
+------------+
| Tables |
+------------+
| test_table |
-+------------+\
-"
- .to_string();
++------------+";
check_output_stream(output, expect).await;
- let output = execute_sql_in_db(&instance, "select * from test_table order by ts", "db").await;
+ let output = execute_sql_with(
+ &instance,
+ "select * from test_table order by ts",
+ query_ctx.clone(),
+ )
+ .await;
let expected = "\
+-------+-----+--------+---------------------+
| host | cpu | memory | ts |
+-------+-----+--------+---------------------+
| host1 | 1.1 | 100.0 | 1970-01-01T00:00:01 |
| host2 | 2.2 | 200.0 | 1970-01-01T00:00:02 |
-+-------+-----+--------+---------------------+\
-"
- .to_string();
++-------+-----+--------+---------------------+";
check_output_stream(output, expected).await;
- try_execute_sql_in_db(&instance, "select * from demo", "db")
+ try_execute_sql_with(&instance, "select * from demo", query_ctx)
.await
.expect_err("no table found in expect");
}
@@ -510,30 +516,31 @@ async fn test_create_table_after_rename_table(instance: Arc<dyn MockInstance>) {
// create test table
let table_name = "demo";
- let output = execute_sql_in_db(
+ let query_ctx = Arc::new(QueryContext::with(DEFAULT_CATALOG_NAME, "db"));
+ let output = execute_sql_with(
&instance,
&format!("create table {table_name}(host string, cpu double, memory double, ts timestamp, time index(ts))"),
- "db",
+ query_ctx.clone(),
)
.await;
assert!(matches!(output, Output::AffectedRows(0)));
// rename table
let new_table_name = "test_table";
- let output = execute_sql_in_db(
+ let output = execute_sql_with(
&instance,
&format!("alter table {table_name} rename {new_table_name}"),
- "db",
+ query_ctx.clone(),
)
.await;
assert!(matches!(output, Output::AffectedRows(0)));
// create table with same name
// create test table
- let output = execute_sql_in_db(
+ let output = execute_sql_with(
&instance,
&format!("create table {table_name}(host string, cpu double, memory double, ts timestamp, time index(ts))"),
- "db",
+ query_ctx.clone(),
)
.await;
assert!(matches!(output, Output::AffectedRows(0)));
@@ -544,10 +551,8 @@ async fn test_create_table_after_rename_table(instance: Arc<dyn MockInstance>) {
+------------+
| demo |
| test_table |
-+------------+\
-"
- .to_string();
- let output = execute_sql_in_db(&instance, "show tables", "db").await;
++------------+";
+ let output = execute_sql_with(&instance, "show tables", query_ctx).await;
check_output_stream(output, expect).await;
}
@@ -594,9 +599,7 @@ async fn test_alter_table(instance: Arc<dyn MockInstance>) {
| host1 | 1.1 | 100.0 | 1970-01-01T00:00:01 | |
| host2 | 2.2 | 200.0 | 1970-01-01T00:00:02 | hello |
| host3 | 3.3 | 300.0 | 1970-01-01T00:00:03 | |
-+-------+-----+--------+---------------------+--------+\
- "
- .to_string();
++-------+-----+--------+---------------------+--------+";
check_output_stream(output, expected).await;
// Drop a column
@@ -611,9 +614,7 @@ async fn test_alter_table(instance: Arc<dyn MockInstance>) {
| host1 | 1.1 | 1970-01-01T00:00:01 | |
| host2 | 2.2 | 1970-01-01T00:00:02 | hello |
| host3 | 3.3 | 1970-01-01T00:00:03 | |
-+-------+-----+---------------------+--------+\
- "
- .to_string();
++-------+-----+---------------------+--------+";
check_output_stream(output, expected).await;
// insert a new row
@@ -633,9 +634,7 @@ async fn test_alter_table(instance: Arc<dyn MockInstance>) {
| host2 | 2.2 | 1970-01-01T00:00:02 | hello |
| host3 | 3.3 | 1970-01-01T00:00:03 | |
| host4 | 400.0 | 1970-01-01T00:00:04 | world |
-+-------+-------+---------------------+--------+\
- "
- .to_string();
++-------+-------+---------------------+--------+";
check_output_stream(output, expected).await;
}
@@ -676,9 +675,7 @@ async fn test_insert_with_default_value_for_type(instance: Arc<Instance>, type_n
+-------+-----+
| host1 | 1.1 |
| host2 | 2.2 |
-+-------+-----+\
- "
- .to_string();
++-------+-----+";
check_output_stream(output, expected).await;
}
@@ -697,42 +694,39 @@ async fn test_use_database(instance: Arc<dyn MockInstance>) {
let output = execute_sql(&instance, "create database db1").await;
assert!(matches!(output, Output::AffectedRows(1)));
- let output = execute_sql_in_db(
+ let query_ctx = Arc::new(QueryContext::with(DEFAULT_CATALOG_NAME, "db1"));
+ let output = execute_sql_with(
&instance,
"create table tb1(col_i32 int, ts bigint, TIME INDEX(ts))",
- "db1",
+ query_ctx.clone(),
)
.await;
assert!(matches!(output, Output::AffectedRows(0)));
- let output = execute_sql_in_db(&instance, "show tables", "db1").await;
+ let output = execute_sql_with(&instance, "show tables", query_ctx.clone()).await;
let expected = "\
+--------+
| Tables |
+--------+
| tb1 |
-+--------+\
- "
- .to_string();
++--------+";
check_output_stream(output, expected).await;
- let output = execute_sql_in_db(
+ let output = execute_sql_with(
&instance,
r#"insert into tb1(col_i32, ts) values (1, 1655276557000)"#,
- "db1",
+ query_ctx.clone(),
)
.await;
assert!(matches!(output, Output::AffectedRows(1)));
- let output = execute_sql_in_db(&instance, "select col_i32 from tb1", "db1").await;
+ let output = execute_sql_with(&instance, "select col_i32 from tb1", query_ctx.clone()).await;
let expected = "\
+---------+
| col_i32 |
+---------+
| 1 |
-+---------+\
- "
- .to_string();
++---------+";
check_output_stream(output, expected).await;
// Making a particular database the default by means of the USE statement does not preclude
@@ -743,9 +737,7 @@ async fn test_use_database(instance: Arc<dyn MockInstance>) {
| number |
+--------+
| 0 |
-+--------+\
- "
- .to_string();
++--------+";
check_output_stream(output, expected).await;
}
@@ -793,9 +785,7 @@ async fn test_delete(instance: Arc<dyn MockInstance>) {
+-------+---------------------+------+--------+
| host2 | 2022-06-15T07:02:38 | 77.7 | 2048.0 |
| host3 | 2022-06-15T07:02:39 | 88.8 | 3072.0 |
-+-------+---------------------+------+--------+\
-"
- .to_string();
++-------+---------------------+------+--------+";
check_output_stream(output, expect).await;
}
@@ -929,34 +919,82 @@ async fn test_execute_copy_from_s3(instance: Arc<dyn MockInstance>) {
+-------+------+--------+---------------------+
| host1 | 66.6 | 1024.0 | 2022-06-15T07:02:37 |
| host2 | 88.8 | 333.3 | 2022-06-15T07:02:38 |
-+-------+------+--------+---------------------+"
- .to_string();
++-------+------+--------+---------------------+";
check_output_stream(output, expected).await;
}
}
}
}
+#[apply(both_instances_cases)]
+async fn test_information_schema(instance: Arc<dyn MockInstance>) {
+ let is_distributed_mode = instance.is_distributed_mode();
+
+ let instance = instance.frontend();
+
+ let sql = "create table another_table(i bigint time index)";
+ let query_ctx = Arc::new(QueryContext::with("another_catalog", "another_schema"));
+ let output = execute_sql_with(&instance, sql, query_ctx.clone()).await;
+ assert!(matches!(output, Output::AffectedRows(0)));
+
+ // User can only see information schema under current catalog.
+ // A necessary requirement to GreptimeCloud.
+ let sql = "select table_catalog, table_schema, table_name, table_type from information_schema.tables where table_type != 'SYSTEM VIEW' order by table_name";
+
+ let output = execute_sql(&instance, sql).await;
+ let expected = if is_distributed_mode {
+ "\
++---------------+--------------------+------------+------------+
+| table_catalog | table_schema | table_name | table_type |
++---------------+--------------------+------------+------------+
+| greptime | public | scripts | BASE TABLE |
+| greptime | information_schema | tables | VIEW |
++---------------+--------------------+------------+------------+"
+ } else {
+ "\
++---------------+--------------------+------------+------------+
+| table_catalog | table_schema | table_name | table_type |
++---------------+--------------------+------------+------------+
+| greptime | public | numbers | BASE TABLE |
+| greptime | public | scripts | BASE TABLE |
+| greptime | information_schema | tables | VIEW |
++---------------+--------------------+------------+------------+"
+ };
+ check_output_stream(output, expected).await;
+
+ let output = execute_sql_with(&instance, sql, query_ctx).await;
+ let expected = "\
++-----------------+--------------------+---------------+------------+
+| table_catalog | table_schema | table_name | table_type |
++-----------------+--------------------+---------------+------------+
+| another_catalog | another_schema | another_table | BASE TABLE |
+| another_catalog | information_schema | tables | VIEW |
++-----------------+--------------------+---------------+------------+";
+ check_output_stream(output, expected).await;
+}
+
async fn execute_sql(instance: &Arc<Instance>, sql: &str) -> Output {
- execute_sql_in_db(instance, sql, DEFAULT_SCHEMA_NAME).await
+ execute_sql_with(instance, sql, QueryContext::arc()).await
}
-async fn try_execute_sql(
- instance: &Arc<Instance>,
- sql: &str,
-) -> Result<Output, crate::error::Error> {
- try_execute_sql_in_db(instance, sql, DEFAULT_SCHEMA_NAME).await
+async fn try_execute_sql(instance: &Arc<Instance>, sql: &str) -> Result<Output> {
+ try_execute_sql_with(instance, sql, QueryContext::arc()).await
}
-async fn try_execute_sql_in_db(
+async fn try_execute_sql_with(
instance: &Arc<Instance>,
sql: &str,
- db: &str,
-) -> Result<Output, crate::error::Error> {
- let query_ctx = Arc::new(QueryContext::with(DEFAULT_CATALOG_NAME, db));
+ query_ctx: QueryContextRef,
+) -> Result<Output> {
instance.do_query(sql, query_ctx).await.remove(0)
}
-async fn execute_sql_in_db(instance: &Arc<Instance>, sql: &str, db: &str) -> Output {
- try_execute_sql_in_db(instance, sql, db).await.unwrap()
+async fn execute_sql_with(
+ instance: &Arc<Instance>,
+ sql: &str,
+ query_ctx: QueryContextRef,
+) -> Output {
+ try_execute_sql_with(instance, sql, query_ctx)
+ .await
+ .unwrap()
}
diff --git a/src/frontend/src/tests/test_util.rs b/src/frontend/src/tests/test_util.rs
index 534e09709e60..c40e2752f241 100644
--- a/src/frontend/src/tests/test_util.rs
+++ b/src/frontend/src/tests/test_util.rs
@@ -87,14 +87,14 @@ pub(crate) fn standalone_instance_case(
) {
}
-pub(crate) async fn check_output_stream(output: Output, expected: String) {
+pub(crate) async fn check_output_stream(output: Output, expected: &str) {
let recordbatches = match output {
Output::Stream(stream) => util::collect_batches(stream).await.unwrap(),
Output::RecordBatches(recordbatches) => recordbatches,
_ => unreachable!(),
};
let pretty_print = recordbatches.pretty_print().unwrap();
- assert_eq!(pretty_print, expected, "{}", pretty_print);
+ assert_eq!(pretty_print, expected, "actual: \n{}", pretty_print);
}
pub(crate) async fn check_unordered_output_stream(output: Output, expected: &str) {
diff --git a/src/meta-srv/src/mocks.rs b/src/meta-srv/src/mocks.rs
index db38a15b35b9..fcd08c5c6a05 100644
--- a/src/meta-srv/src/mocks.rs
+++ b/src/meta-srv/src/mocks.rs
@@ -13,6 +13,7 @@
// limitations under the License.
use std::sync::Arc;
+use std::time::Duration;
use api::v1::meta::heartbeat_server::HeartbeatServer;
use api::v1::meta::router_server::RouterServer;
@@ -73,7 +74,10 @@ pub async fn mock(
.await
});
- let config = ChannelConfig::new();
+ let config = ChannelConfig::new()
+ .timeout(Duration::from_secs(1))
+ .connect_timeout(Duration::from_secs(1))
+ .tcp_nodelay(true);
let channel_manager = ChannelManager::with_config(config);
// Move client to an option so we can _move_ the inner value
diff --git a/src/query/src/datafusion.rs b/src/query/src/datafusion.rs
index 8e147a26b74d..f17cef3634b8 100644
--- a/src/query/src/datafusion.rs
+++ b/src/query/src/datafusion.rs
@@ -14,7 +14,6 @@
//! Planner, QueryEngine implementations based on DataFusion.
-mod catalog_adapter;
mod error;
mod planner;
@@ -22,6 +21,7 @@ use std::collections::HashMap;
use std::sync::Arc;
use async_trait::async_trait;
+pub use catalog::datafusion::catalog_adapter::DfCatalogListAdapter;
use common_error::prelude::BoxedError;
use common_function::scalars::aggregate::AggregateFunctionMetaRef;
use common_function::scalars::udf::create_udf;
@@ -44,7 +44,6 @@ use snafu::{ensure, OptionExt, ResultExt};
use table::requests::{DeleteRequest, InsertRequest};
use table::TableRef;
-pub use crate::datafusion::catalog_adapter::DfCatalogListAdapter;
pub use crate::datafusion::planner::DfContextProviderAdapter;
use crate::error::{
CatalogNotFoundSnafu, CatalogSnafu, CreateRecordBatchSnafu, DataFusionSnafu,
diff --git a/src/query/src/datafusion/error.rs b/src/query/src/datafusion/error.rs
index 2f52ba8b4ade..569f7d4790e1 100644
--- a/src/query/src/datafusion/error.rs
+++ b/src/query/src/datafusion/error.rs
@@ -38,12 +38,6 @@ pub enum InnerError {
source: datatypes::error::Error,
},
- #[snafu(display("Failed to convert table schema, source: {}", source))]
- TableSchemaMismatch {
- #[snafu(backtrace)]
- source: table::error::Error,
- },
-
#[snafu(display(
"Failed to convert DataFusion's recordbatch stream, source: {}",
source
@@ -67,10 +61,7 @@ impl ErrorExt for InnerError {
match self {
// TODO(yingwen): Further categorize datafusion error.
Datafusion { .. } => StatusCode::EngineExecuteQuery,
- // This downcast should not fail in usual case.
- PhysicalPlanDowncast { .. } | ConvertSchema { .. } | TableSchemaMismatch { .. } => {
- StatusCode::Unexpected
- }
+ PhysicalPlanDowncast { .. } | ConvertSchema { .. } => StatusCode::Unexpected,
ConvertDfRecordBatchStream { source } => source.status_code(),
ExecutePhysicalPlan { source } => source.status_code(),
}
diff --git a/src/query/src/optimizer.rs b/src/query/src/optimizer.rs
index ef7745ccdb88..a02108827e7c 100644
--- a/src/query/src/optimizer.rs
+++ b/src/query/src/optimizer.rs
@@ -93,7 +93,6 @@ impl OptimizerRule for TypeConversionRule {
| LogicalPlan::DropView { .. }
| LogicalPlan::Distinct { .. }
| LogicalPlan::Values { .. }
- | LogicalPlan::SetVariable { .. }
| LogicalPlan::Analyze { .. } => {
let inputs = plan.inputs();
let mut new_inputs = Vec::with_capacity(inputs.len());
@@ -120,7 +119,8 @@ impl OptimizerRule for TypeConversionRule {
| LogicalPlan::Prepare(_)
| LogicalPlan::Dml(_)
| LogicalPlan::DescribeTable(_)
- | LogicalPlan::Unnest(_) => Ok(Some(plan.clone())),
+ | LogicalPlan::Unnest(_)
+ | LogicalPlan::Statement(_) => Ok(Some(plan.clone())),
}
}
diff --git a/src/query/src/tests/mean_test.rs b/src/query/src/tests/mean_test.rs
index 5ae9a0a605d0..604ad33f6774 100644
--- a/src/query/src/tests/mean_test.rs
+++ b/src/query/src/tests/mean_test.rs
@@ -18,7 +18,6 @@ use datatypes::for_all_primitive_types;
use datatypes::prelude::*;
use datatypes::types::WrapperType;
use datatypes::value::OrderedFloat;
-use format_num::NumberFormat;
use num_traits::AsPrimitive;
use crate::error::Result;
@@ -56,14 +55,12 @@ where
let numbers =
function::get_numbers_from_table::<T>(column_name, table_name, engine.clone()).await;
- let expected_value = numbers.iter().map(|&n| n.as_()).collect::<Vec<f64>>();
-
- let expected_value = inc_stats::mean(expected_value.iter().cloned()).unwrap();
- if let Value::Float64(OrderedFloat(value)) = value {
- let num = NumberFormat::new();
- let value = num.format(".6e", value);
- let expected_value = num.format(".6e", expected_value);
- assert_eq!(value, expected_value);
- }
+ let numbers = numbers.iter().map(|&n| n.as_()).collect::<Vec<f64>>();
+ let expected = numbers.iter().sum::<f64>() / (numbers.len() as f64);
+ let Value::Float64(OrderedFloat(value)) = value else { unreachable!() };
+ assert!(
+ (value - expected).abs() < 1e-3,
+ "expected {expected}, actual {value}"
+ );
Ok(())
}
diff --git a/src/script/src/python/pyo3/builtins.rs b/src/script/src/python/pyo3/builtins.rs
index 514b7e729ce5..8d9609421c69 100644
--- a/src/script/src/python/pyo3/builtins.rs
+++ b/src/script/src/python/pyo3/builtins.rs
@@ -279,8 +279,7 @@ fn sqrt(py: Python<'_>, val: PyObject) -> PyResult<PyObject> {
```
*/
bind_call_unary_math_function!(
- sqrt, sin, cos, tan, asin, acos, atan, floor, ceil, round, trunc, abs, signum, exp, ln, log2,
- log10
+ sqrt, sin, cos, tan, asin, acos, atan, floor, ceil, trunc, abs, signum, exp, ln, log2, log10
);
/// return a random vector range from 0 to 1 and length of len
@@ -296,6 +295,15 @@ fn random(py: Python<'_>, len: usize) -> PyResult<PyObject> {
columnar_value_to_py_any(py, res)
}
+#[pyfunction]
+fn round(py: Python<'_>, val: PyObject) -> PyResult<PyObject> {
+ let value = try_into_columnar_value(py, val)?;
+ let array = value.into_array(1);
+ let result =
+ math_expressions::round(&[array]).map_err(|e| PyValueError::new_err(format!("{e:?}")))?;
+ columnar_value_to_py_any(py, ColumnarValue::Array(result))
+}
+
/// The macro for binding function in `datafusion_physical_expr::expressions`(most of them are aggregate function)
macro_rules! bind_aggr_expr {
($FUNC_NAME:ident, $AGGR_FUNC: ident, [$($ARG: ident),*], $ARG_TY: ident, $($EXPR:ident => $idx: literal),*) => {
diff --git a/src/script/src/python/rspython/builtins.rs b/src/script/src/python/rspython/builtins.rs
index bc0c5ba155ac..42f4ae944b11 100644
--- a/src/script/src/python/rspython/builtins.rs
+++ b/src/script/src/python/rspython/builtins.rs
@@ -545,7 +545,10 @@ pub(crate) mod greptime_builtin {
/// simple math function, the backing implement is datafusion's `round` math function
#[pyfunction]
fn round(val: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> {
- bind_call_unary_math_function!(round, vm, val);
+ let value = try_into_columnar_value(val, vm)?;
+ let array = value.into_array(1);
+ let result = math_expressions::round(&[array]).map_err(|e| from_df_err(e, vm))?;
+ try_into_py_obj(DFColValue::Array(result), vm)
}
/// simple math function, the backing implement is datafusion's `trunc` math function
diff --git a/tests/cases/standalone/common/system/information_schema.result b/tests/cases/standalone/common/system/information_schema.result
new file mode 100644
index 000000000000..e37515362091
--- /dev/null
+++ b/tests/cases/standalone/common/system/information_schema.result
@@ -0,0 +1,48 @@
+create
+database my_db;
+
+Affected Rows: 1
+
+use
+my_db;
+
+++
+++
+
+create table foo
+(
+ ts bigint time index
+);
+
+Affected Rows: 0
+
+select table_name
+from information_schema.tables
+where table_schema = 'my_db'
+order by table_name;
+
++------------+
+| table_name |
++------------+
+| foo |
++------------+
+
+select table_catalog, table_schema, table_name, table_type
+from information_schema.tables
+where table_catalog = 'greptime'
+ and table_schema != 'public'
+order by table_schema, table_name;
+
++---------------+--------------------+------------+------------+
+| table_catalog | table_schema | table_name | table_type |
++---------------+--------------------+------------+------------+
+| greptime | information_schema | tables | VIEW |
+| greptime | my_db | foo | BASE TABLE |
++---------------+--------------------+------------+------------+
+
+use
+public;
+
+++
+++
+
diff --git a/tests/cases/standalone/common/system/information_schema.sql b/tests/cases/standalone/common/system/information_schema.sql
new file mode 100644
index 000000000000..871c01c49d93
--- /dev/null
+++ b/tests/cases/standalone/common/system/information_schema.sql
@@ -0,0 +1,24 @@
+create
+database my_db;
+
+use
+my_db;
+
+create table foo
+(
+ ts bigint time index
+);
+
+select table_name
+from information_schema.tables
+where table_schema = 'my_db'
+order by table_name;
+
+select table_catalog, table_schema, table_name, table_type
+from information_schema.tables
+where table_catalog = 'greptime'
+ and table_schema != 'public'
+order by table_schema, table_name;
+
+use
+public;
diff --git a/tests/runner/src/env.rs b/tests/runner/src/env.rs
index d3ac5b8ce288..abb60b1f72d0 100644
--- a/tests/runner/src/env.rs
+++ b/tests/runner/src/env.rs
@@ -169,7 +169,7 @@ impl Env {
}
"frontend" => {
args.push("--metasrv-addr=0.0.0.0:3002".to_string());
- args.push("--http-addr=0.0.0.0:5000".to_string());
+ args.push("--http-addr=0.0.0.0:5003".to_string());
}
"metasrv" => {
args.push("--use-memory-store".to_string());
@@ -264,7 +264,7 @@ impl Database for GreptimeDB {
}
let mut client = self.client.lock().await;
- if query.trim().starts_with("USE ") {
+ if query.trim().to_lowercase().starts_with("use ") {
let database = query
.split_ascii_whitespace()
.nth(1)
|
feat
|
information schema (#1327)
|
ae81c7329d0ae217ca63bc5b447ffe2b723870ee
|
2023-05-30 17:29:38
|
Zou Wei
|
feat: support azblob storage. (#1659)
| false
|
diff --git a/.env.example b/.env.example
index 4abec140f648..3bb3de91d466 100644
--- a/.env.example
+++ b/.env.example
@@ -9,3 +9,9 @@ GT_OSS_BUCKET=OSS bucket
GT_OSS_ACCESS_KEY_ID=OSS access key id
GT_OSS_ACCESS_KEY=OSS access key
GT_OSS_ENDPOINT=OSS endpoint
+# Settings for azblob test
+GT_AZBLOB_CONTAINER=AZBLOB container
+GT_AZBLOB_ACCOUNT_NAME=AZBLOB account name
+GT_AZBLOB_ACCOUNT_KEY=AZBLOB account key
+GT_AZBLOB_ENDPOINT=AZBLOB endpoint
+
diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs
index 70a4f68eb0e7..448f11ffe5c0 100644
--- a/src/cmd/src/datanode.rs
+++ b/src/cmd/src/datanode.rs
@@ -280,6 +280,7 @@ mod tests {
}
ObjectStoreConfig::S3 { .. } => unreachable!(),
ObjectStoreConfig::Oss { .. } => unreachable!(),
+ ObjectStoreConfig::Azblob { .. } => unreachable!(),
};
assert_eq!(
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index 7ffa68ba69d8..664f13aead21 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -47,6 +47,7 @@ pub enum ObjectStoreConfig {
File(FileConfig),
S3(S3Config),
Oss(OssConfig),
+ Azblob(AzblobConfig),
}
/// Storage engine config
@@ -95,6 +96,21 @@ pub struct OssConfig {
pub cache_capacity: Option<ReadableSize>,
}
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(default)]
+pub struct AzblobConfig {
+ pub container: String,
+ pub root: String,
+ #[serde(skip_serializing)]
+ pub account_name: SecretString,
+ #[serde(skip_serializing)]
+ pub account_key: SecretString,
+ pub endpoint: String,
+ pub sas_token: Option<String>,
+ pub cache_path: Option<String>,
+ pub cache_capacity: Option<ReadableSize>,
+}
+
impl Default for S3Config {
fn default() -> Self {
Self {
@@ -124,6 +140,21 @@ impl Default for OssConfig {
}
}
+impl Default for AzblobConfig {
+ fn default() -> Self {
+ Self {
+ container: String::default(),
+ root: String::default(),
+ account_name: SecretString::from(String::default()),
+ account_key: SecretString::from(String::default()),
+ endpoint: String::default(),
+ cache_path: Option::default(),
+ cache_capacity: Option::default(),
+ sas_token: Option::default(),
+ }
+ }
+}
+
impl Default for ObjectStoreConfig {
fn default() -> Self {
ObjectStoreConfig::File(FileConfig {
diff --git a/src/datanode/src/store.rs b/src/datanode/src/store.rs
index 31cd141c2677..99d3faa8fd48 100644
--- a/src/datanode/src/store.rs
+++ b/src/datanode/src/store.rs
@@ -14,6 +14,7 @@
//! object storage utilities
+mod azblob;
mod fs;
mod oss;
mod s3;
@@ -36,6 +37,9 @@ pub(crate) async fn new_object_store(store_config: &ObjectStoreConfig) -> Result
ObjectStoreConfig::File(file_config) => fs::new_fs_object_store(file_config).await,
ObjectStoreConfig::S3(s3_config) => s3::new_s3_object_store(s3_config).await,
ObjectStoreConfig::Oss(oss_config) => oss::new_oss_object_store(oss_config).await,
+ ObjectStoreConfig::Azblob(azblob_config) => {
+ azblob::new_azblob_object_store(azblob_config).await
+ }
}?;
// Enable retry layer and cache layer for non-fs object storages
@@ -76,6 +80,13 @@ async fn create_object_store_with_cache(
.unwrap_or(DEFAULT_OBJECT_STORE_CACHE_SIZE);
(path, capacity)
}
+ ObjectStoreConfig::Azblob(azblob_config) => {
+ let path = azblob_config.cache_path.as_ref();
+ let capacity = azblob_config
+ .cache_capacity
+ .unwrap_or(DEFAULT_OBJECT_STORE_CACHE_SIZE);
+ (path, capacity)
+ }
_ => (None, ReadableSize(0)),
};
diff --git a/src/datanode/src/store/azblob.rs b/src/datanode/src/store/azblob.rs
new file mode 100644
index 000000000000..40497fd38c56
--- /dev/null
+++ b/src/datanode/src/store/azblob.rs
@@ -0,0 +1,47 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use common_telemetry::logging::info;
+use object_store::services::Azblob as AzureBuilder;
+use object_store::{util, ObjectStore};
+use secrecy::ExposeSecret;
+use snafu::prelude::*;
+
+use crate::datanode::AzblobConfig;
+use crate::error::{self, Result};
+
+pub(crate) async fn new_azblob_object_store(azblob_config: &AzblobConfig) -> Result<ObjectStore> {
+ let root = util::normalize_dir(&azblob_config.root);
+
+ info!(
+ "The azure storage container is: {}, root is: {}",
+ azblob_config.container, &root
+ );
+
+ let mut builder = AzureBuilder::default();
+ builder
+ .root(&root)
+ .container(&azblob_config.container)
+ .endpoint(&azblob_config.endpoint)
+ .account_name(azblob_config.account_name.expose_secret())
+ .account_key(azblob_config.account_key.expose_secret());
+
+ if let Some(token) = &azblob_config.sas_token {
+ builder.sas_token(token);
+ }
+
+ Ok(ObjectStore::new(builder)
+ .context(error::InitBackendSnafu)?
+ .finish())
+}
diff --git a/src/object-store/tests/object_store_test.rs b/src/object-store/tests/object_store_test.rs
index bc3f29b51500..c70587bed7d7 100644
--- a/src/object-store/tests/object_store_test.rs
+++ b/src/object-store/tests/object_store_test.rs
@@ -23,7 +23,7 @@ use object_store::services::{Fs, S3};
use object_store::test_util::TempFolder;
use object_store::{util, ObjectStore, ObjectStoreBuilder};
use opendal::raw::Accessor;
-use opendal::services::Oss;
+use opendal::services::{Azblob, Oss};
use opendal::{EntryMode, Operator, OperatorBuilder};
async fn test_object_crud(store: &ObjectStore) -> Result<()> {
@@ -158,6 +158,33 @@ async fn test_oss_backend() -> Result<()> {
Ok(())
}
+#[tokio::test]
+async fn test_azblob_backend() -> Result<()> {
+ logging::init_default_ut_logging();
+ if let Ok(container) = env::var("GT_AZBLOB_CONTAINER") {
+ if !container.is_empty() {
+ logging::info!("Running azblob test.");
+
+ let root = uuid::Uuid::new_v4().to_string();
+
+ let mut builder = Azblob::default();
+ builder
+ .root(&root)
+ .account_name(&env::var("GT_AZBLOB_ACCOUNT_NAME")?)
+ .account_key(&env::var("GT_AZBLOB_ACCOUNT_KEY")?)
+ .container(&container);
+
+ let store = ObjectStore::new(builder).unwrap().finish();
+
+ let mut guard = TempFolder::new(&store, "/");
+ test_object_crud(&store).await?;
+ test_object_list(&store).await?;
+ guard.remove_all().await?;
+ }
+ }
+ Ok(())
+}
+
async fn assert_lru_cache<C: Accessor + Clone>(
cache_layer: &LruCacheLayer<C>,
file_names: &[&str],
diff --git a/tests-integration/README.md b/tests-integration/README.md
index 6e8a637ad628..27f66da86780 100644
--- a/tests-integration/README.md
+++ b/tests-integration/README.md
@@ -32,3 +32,9 @@ Test oss storage:
```
cargo test oss
```
+
+Test azblob storage:
+
+```
+cargo test azblob
+```
\ No newline at end of file
diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs
index 043a8214bac8..2f7eb74c6638 100644
--- a/tests-integration/src/test_util.rs
+++ b/tests-integration/src/test_util.rs
@@ -26,8 +26,8 @@ use common_runtime::Builder as RuntimeBuilder;
use common_test_util::ports;
use common_test_util::temp_dir::{create_temp_dir, TempDir};
use datanode::datanode::{
- DatanodeOptions, FileConfig, ObjectStoreConfig, OssConfig, ProcedureConfig, S3Config,
- StorageConfig, WalConfig,
+ AzblobConfig, DatanodeOptions, FileConfig, ObjectStoreConfig, OssConfig, ProcedureConfig,
+ S3Config, StorageConfig, WalConfig,
};
use datanode::error::{CreateTableSnafu, Result};
use datanode::instance::Instance;
@@ -35,7 +35,7 @@ use datanode::sql::SqlHandler;
use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnSchema, RawSchema};
use frontend::instance::Instance as FeInstance;
-use object_store::services::{Oss, S3};
+use object_store::services::{Azblob, Oss, S3};
use object_store::test_util::TempFolder;
use object_store::ObjectStore;
use secrecy::ExposeSecret;
@@ -57,6 +57,7 @@ pub enum StorageType {
S3WithCache,
File,
Oss,
+ Azblob,
}
impl StorageType {
@@ -79,6 +80,13 @@ impl StorageType {
false
}
}
+ StorageType::Azblob => {
+ if let Ok(b) = env::var("GT_AZBLOB_CONTAINER") {
+ !b.is_empty()
+ } else {
+ false
+ }
+ }
}
}
}
@@ -101,6 +109,34 @@ pub fn get_test_store_config(
let _ = dotenv::dotenv();
match store_type {
+ StorageType::Azblob => {
+ let azblob_config = AzblobConfig {
+ root: uuid::Uuid::new_v4().to_string(),
+ container: env::var("GT_AZBLOB_CONTAINER").unwrap(),
+ account_name: env::var("GT_AZBLOB_ACCOUNT_NAME").unwrap().into(),
+ account_key: env::var("GT_AZBLOB_ACCOUNT_KEY").unwrap().into(),
+ endpoint: env::var("GT_AZBLOB_ENDPOINT").unwrap(),
+ ..Default::default()
+ };
+
+ let mut builder = Azblob::default();
+ builder
+ .root(&azblob_config.root)
+ .endpoint(&azblob_config.endpoint)
+ .account_name(azblob_config.account_name.expose_secret())
+ .account_key(azblob_config.account_key.expose_secret())
+ .container(&azblob_config.container);
+
+ if let Ok(sas_token) = env::var("GT_AZBLOB_SAS_TOKEN") {
+ builder.sas_token(&sas_token);
+ }
+
+ let config = ObjectStoreConfig::Azblob(azblob_config);
+
+ let store = ObjectStore::new(builder).unwrap().finish();
+
+ (config, TempDirGuard::Azblob(TempFolder::new(&store, "/")))
+ }
StorageType::Oss => {
let oss_config = OssConfig {
root: uuid::Uuid::new_v4().to_string(),
@@ -169,6 +205,7 @@ pub enum TempDirGuard {
File(TempDir),
S3(TempFolder),
Oss(TempFolder),
+ Azblob(TempFolder),
}
pub struct TestGuard {
@@ -182,7 +219,9 @@ pub struct StorageGuard(pub TempDirGuard);
impl TestGuard {
pub async fn remove_all(&mut self) {
- if let TempDirGuard::S3(guard) | TempDirGuard::Oss(guard) = &mut self.storage_guard.0 {
+ if let TempDirGuard::S3(guard) | TempDirGuard::Oss(guard) | TempDirGuard::Azblob(guard) =
+ &mut self.storage_guard.0
+ {
guard.remove_all().await.unwrap()
}
}
diff --git a/tests-integration/tests/main.rs b/tests-integration/tests/main.rs
index 2b4e17bbf3f7..e3b09a30a74b 100644
--- a/tests-integration/tests/main.rs
+++ b/tests-integration/tests/main.rs
@@ -17,5 +17,5 @@ mod grpc;
#[macro_use]
mod http;
-grpc_tests!(File, S3, S3WithCache, Oss);
-http_tests!(File, S3, S3WithCache, Oss);
+grpc_tests!(File, S3, S3WithCache, Oss, Azblob);
+http_tests!(File, S3, S3WithCache, Oss, Azblob);
|
feat
|
support azblob storage. (#1659)
|
a7dc86ffe5c7be47e65c399b124c33222431aade
|
2023-01-29 17:39:38
|
Yun Chen
|
feat: oss storage support (#911)
| false
|
diff --git a/.env.example b/.env.example
index 9117d0d4c68a..da1bbcc2136d 100644
--- a/.env.example
+++ b/.env.example
@@ -2,3 +2,9 @@
GT_S3_BUCKET=S3 bucket
GT_S3_ACCESS_KEY_ID=S3 access key id
GT_S3_ACCESS_KEY=S3 secret access key
+
+# Settings for oss test
+GT_OSS_BUCKET=OSS bucket
+GT_OSS_ACCESS_KEY_ID=OSS access key id
+GT_OSS_ACCESS_KEY=OSS access key
+GT_OSS_ENDPOINT=OSS endpoint
diff --git a/Cargo.lock b/Cargo.lock
index 18a8164c01c5..f2fcbd6f9a40 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -681,9 +681,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8"
[[package]]
name = "base64"
-version = "0.20.0"
+version = "0.21.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0ea22880d78093b0cbe17c89f64a7d457941e65759157ec6cb31a31d652b05e5"
+checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a"
[[package]]
name = "benchmarks"
@@ -4401,20 +4401,21 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
[[package]]
name = "opendal"
-version = "0.24.2"
+version = "0.25.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "97541724cf371973b28f5a873404f2a2a4f7bb1efe7ca36a27836c13958781c2"
+checksum = "73829d3a057542556dc2c2d2b70700a44dda913cdb5483094c20ef9673ca283c"
dependencies = [
"anyhow",
"async-compat",
"async-trait",
"backon",
- "base64 0.20.0",
+ "base64 0.21.0",
"bincode 2.0.0-rc.2",
"bytes",
"flagset",
"futures",
"http",
+ "hyper",
"log",
"md-5",
"metrics",
@@ -5674,13 +5675,13 @@ dependencies = [
[[package]]
name = "reqsign"
-version = "0.7.4"
+version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1c97ac0f771c78ddf4bcb73c8454c76565a7249780e7296767f7e89661b0e045"
+checksum = "3f446438814fde3785305a59a85a6d1b361ce2c9d29e58dd87c9103a242c40b6"
dependencies = [
"anyhow",
"backon",
- "base64 0.20.0",
+ "base64 0.21.0",
"bytes",
"dirs 4.0.0",
"form_urlencoded",
diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs
index b431d30913f3..4c1f066f1d2f 100644
--- a/src/cmd/src/datanode.rs
+++ b/src/cmd/src/datanode.rs
@@ -14,7 +14,7 @@
use clap::Parser;
use common_telemetry::logging;
-use datanode::datanode::{Datanode, DatanodeOptions, ObjectStoreConfig};
+use datanode::datanode::{Datanode, DatanodeOptions, FileConfig, ObjectStoreConfig};
use meta_client::MetaClientOpts;
use servers::Mode;
use snafu::ResultExt;
@@ -128,7 +128,7 @@ impl TryFrom<StartCommand> for DatanodeOptions {
}
if let Some(data_dir) = cmd.data_dir {
- opts.storage = ObjectStoreConfig::File { data_dir };
+ opts.storage = ObjectStoreConfig::File(FileConfig { data_dir });
}
if let Some(wal_dir) = cmd.wal_dir {
@@ -175,10 +175,11 @@ mod tests {
assert!(!tcp_nodelay);
match options.storage {
- ObjectStoreConfig::File { data_dir } => {
+ ObjectStoreConfig::File(FileConfig { data_dir }) => {
assert_eq!("/tmp/greptimedb/data/".to_string(), data_dir)
}
ObjectStoreConfig::S3 { .. } => unreachable!(),
+ ObjectStoreConfig::Oss { .. } => unreachable!(),
};
}
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index e2d385fde8a6..22e2231fd40e 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -28,24 +28,43 @@ use crate::server::Services;
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "type")]
pub enum ObjectStoreConfig {
- File {
- data_dir: String,
- },
- S3 {
- bucket: String,
- root: String,
- access_key_id: String,
- secret_access_key: String,
- endpoint: Option<String>,
- region: Option<String>,
- },
+ File(FileConfig),
+ S3(S3Config),
+ Oss(OssConfig),
+}
+
+#[derive(Debug, Clone, Serialize, Default, Deserialize)]
+#[serde(default)]
+pub struct FileConfig {
+ pub data_dir: String,
+}
+
+#[derive(Debug, Clone, Serialize, Default, Deserialize)]
+#[serde(default)]
+pub struct S3Config {
+ pub bucket: String,
+ pub root: String,
+ pub access_key_id: String,
+ pub secret_access_key: String,
+ pub endpoint: Option<String>,
+ pub region: Option<String>,
+}
+
+#[derive(Debug, Clone, Serialize, Default, Deserialize)]
+#[serde(default)]
+pub struct OssConfig {
+ pub bucket: String,
+ pub root: String,
+ pub access_key_id: String,
+ pub access_key_secret: String,
+ pub endpoint: String,
}
impl Default for ObjectStoreConfig {
fn default() -> Self {
- ObjectStoreConfig::File {
+ ObjectStoreConfig::File(FileConfig {
data_dir: "/tmp/greptimedb/data/".to_string(),
- }
+ })
}
}
diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs
index 2c89d740241c..a75ceab75df6 100644
--- a/src/datanode/src/instance.rs
+++ b/src/datanode/src/instance.rs
@@ -30,6 +30,7 @@ use mito::config::EngineConfig as TableEngineConfig;
use mito::engine::MitoEngine;
use object_store::layers::{LoggingLayer, MetricsLayer, RetryLayer, TracingLayer};
use object_store::services::fs::Builder as FsBuilder;
+use object_store::services::oss::Builder as OSSBuilder;
use object_store::services::s3::Builder as S3Builder;
use object_store::{util, ObjectStore};
use query::query_engine::{QueryEngineFactory, QueryEngineRef};
@@ -201,8 +202,9 @@ impl Instance {
pub(crate) async fn new_object_store(store_config: &ObjectStoreConfig) -> Result<ObjectStore> {
let object_store = match store_config {
- ObjectStoreConfig::File { data_dir } => new_fs_object_store(data_dir).await,
+ ObjectStoreConfig::File { .. } => new_fs_object_store(store_config).await,
ObjectStoreConfig::S3 { .. } => new_s3_object_store(store_config).await,
+ ObjectStoreConfig::Oss { .. } => new_oss_object_store(store_config).await,
};
object_store.map(|object_store| {
@@ -214,41 +216,57 @@ pub(crate) async fn new_object_store(store_config: &ObjectStoreConfig) -> Result
})
}
+pub(crate) async fn new_oss_object_store(store_config: &ObjectStoreConfig) -> Result<ObjectStore> {
+ let oss_config = match store_config {
+ ObjectStoreConfig::Oss(config) => config,
+ _ => unreachable!(),
+ };
+
+ let root = util::normalize_dir(&oss_config.root);
+ info!(
+ "The oss storage bucket is: {}, root is: {}",
+ oss_config.bucket, &root
+ );
+
+ let mut builder = OSSBuilder::default();
+ let builder = builder
+ .root(&root)
+ .bucket(&oss_config.bucket)
+ .endpoint(&oss_config.endpoint)
+ .access_key_id(&oss_config.access_key_id)
+ .access_key_secret(&oss_config.access_key_secret);
+
+ let accessor = builder.build().with_context(|_| error::InitBackendSnafu {
+ config: store_config.clone(),
+ })?;
+
+ Ok(ObjectStore::new(accessor))
+}
+
pub(crate) async fn new_s3_object_store(store_config: &ObjectStoreConfig) -> Result<ObjectStore> {
- let (root, secret_key, key_id, bucket, endpoint, region) = match store_config {
- ObjectStoreConfig::S3 {
- bucket,
- root,
- access_key_id,
- secret_access_key,
- endpoint,
- region,
- } => (
- root,
- secret_access_key,
- access_key_id,
- bucket,
- endpoint,
- region,
- ),
+ let s3_config = match store_config {
+ ObjectStoreConfig::S3(config) => config,
_ => unreachable!(),
};
- let root = util::normalize_dir(root);
- info!("The s3 storage bucket is: {}, root is: {}", bucket, &root);
+ let root = util::normalize_dir(&s3_config.root);
+ info!(
+ "The s3 storage bucket is: {}, root is: {}",
+ s3_config.bucket, &root
+ );
let mut builder = S3Builder::default();
let mut builder = builder
.root(&root)
- .bucket(bucket)
- .access_key_id(key_id)
- .secret_access_key(secret_key);
+ .bucket(&s3_config.bucket)
+ .access_key_id(&s3_config.access_key_id)
+ .secret_access_key(&s3_config.secret_access_key);
- if let Some(endpoint) = endpoint {
- builder = builder.endpoint(endpoint);
+ if s3_config.endpoint.is_some() {
+ builder = builder.endpoint(s3_config.endpoint.as_ref().unwrap());
}
- if let Some(region) = region {
- builder = builder.region(region);
+ if s3_config.region.is_some() {
+ builder = builder.region(s3_config.region.as_ref().unwrap());
}
let accessor = builder.build().with_context(|_| error::InitBackendSnafu {
@@ -258,8 +276,12 @@ pub(crate) async fn new_s3_object_store(store_config: &ObjectStoreConfig) -> Res
Ok(ObjectStore::new(accessor))
}
-pub(crate) async fn new_fs_object_store(data_dir: &str) -> Result<ObjectStore> {
- let data_dir = util::normalize_dir(data_dir);
+pub(crate) async fn new_fs_object_store(store_config: &ObjectStoreConfig) -> Result<ObjectStore> {
+ let file_config = match store_config {
+ ObjectStoreConfig::File(config) => config,
+ _ => unreachable!(),
+ };
+ let data_dir = util::normalize_dir(&file_config.data_dir);
fs::create_dir_all(path::Path::new(&data_dir))
.context(error::CreateDirSnafu { dir: &data_dir })?;
info!("The file storage directory is: {}", &data_dir);
@@ -271,7 +293,7 @@ pub(crate) async fn new_fs_object_store(data_dir: &str) -> Result<ObjectStore> {
.atomic_write_dir(&atomic_write_dir)
.build()
.context(error::InitBackendSnafu {
- config: ObjectStoreConfig::File { data_dir },
+ config: store_config.clone(),
})?;
Ok(ObjectStore::new(accessor))
diff --git a/src/datanode/src/tests/test_util.rs b/src/datanode/src/tests/test_util.rs
index d21603d0e0c3..5163fd59bc57 100644
--- a/src/datanode/src/tests/test_util.rs
+++ b/src/datanode/src/tests/test_util.rs
@@ -29,7 +29,7 @@ use table::engine::{EngineContext, TableEngineRef};
use table::requests::CreateTableRequest;
use tempdir::TempDir;
-use crate::datanode::{DatanodeOptions, ObjectStoreConfig, WalConfig};
+use crate::datanode::{DatanodeOptions, FileConfig, ObjectStoreConfig, WalConfig};
use crate::error::{CreateTableSnafu, Result};
use crate::instance::Instance;
use crate::sql::SqlHandler;
@@ -67,9 +67,9 @@ fn create_tmp_dir_and_datanode_opts(name: &str) -> (DatanodeOptions, TestGuard)
dir: wal_tmp_dir.path().to_str().unwrap().to_string(),
..Default::default()
},
- storage: ObjectStoreConfig::File {
+ storage: ObjectStoreConfig::File(FileConfig {
data_dir: data_tmp_dir.path().to_str().unwrap().to_string(),
- },
+ }),
mode: Mode::Standalone,
..Default::default()
};
diff --git a/src/frontend/src/tests.rs b/src/frontend/src/tests.rs
index 9fd55e24fb9e..3249000605aa 100644
--- a/src/frontend/src/tests.rs
+++ b/src/frontend/src/tests.rs
@@ -20,7 +20,7 @@ use catalog::remote::MetaKvBackend;
use client::Client;
use common_grpc::channel_manager::ChannelManager;
use common_runtime::Builder as RuntimeBuilder;
-use datanode::datanode::{DatanodeOptions, ObjectStoreConfig, WalConfig};
+use datanode::datanode::{DatanodeOptions, FileConfig, ObjectStoreConfig, WalConfig};
use datanode::instance::Instance as DatanodeInstance;
use meta_client::client::MetaClientBuilder;
use meta_client::rpc::Peer;
@@ -81,9 +81,9 @@ fn create_tmp_dir_and_datanode_opts(name: &str) -> (DatanodeOptions, TestGuard)
dir: wal_tmp_dir.path().to_str().unwrap().to_string(),
..Default::default()
},
- storage: ObjectStoreConfig::File {
+ storage: ObjectStoreConfig::File(FileConfig {
data_dir: data_tmp_dir.path().to_str().unwrap().to_string(),
- },
+ }),
mode: Mode::Standalone,
..Default::default()
};
@@ -167,9 +167,9 @@ async fn create_distributed_datanode(
dir: wal_tmp_dir.path().to_str().unwrap().to_string(),
..Default::default()
},
- storage: ObjectStoreConfig::File {
+ storage: ObjectStoreConfig::File(FileConfig {
data_dir: data_tmp_dir.path().to_str().unwrap().to_string(),
- },
+ }),
mode: Mode::Distributed,
..Default::default()
};
diff --git a/src/object-store/Cargo.toml b/src/object-store/Cargo.toml
index 0c7ce0026311..bf03f7e5017d 100644
--- a/src/object-store/Cargo.toml
+++ b/src/object-store/Cargo.toml
@@ -6,7 +6,10 @@ license.workspace = true
[dependencies]
futures = { version = "0.3" }
-opendal = { version = "0.24", features = ["layers-tracing", "layers-metrics"] }
+opendal = { version = "0.25.1", features = [
+ "layers-tracing",
+ "layers-metrics",
+] }
tokio.workspace = true
[dev-dependencies]
diff --git a/src/object-store/tests/object_store_test.rs b/src/object-store/tests/object_store_test.rs
index c3173fbf99c5..c52db9e10707 100644
--- a/src/object-store/tests/object_store_test.rs
+++ b/src/object-store/tests/object_store_test.rs
@@ -19,6 +19,7 @@ use common_telemetry::logging;
use object_store::backend::{fs, s3};
use object_store::test_util::TempFolder;
use object_store::{util, Object, ObjectLister, ObjectMode, ObjectStore};
+use opendal::services::oss;
use tempdir::TempDir;
async fn test_object_crud(store: &ObjectStore) -> Result<()> {
@@ -131,3 +132,31 @@ async fn test_s3_backend() -> Result<()> {
Ok(())
}
+
+#[tokio::test]
+async fn test_oss_backend() -> Result<()> {
+ logging::init_default_ut_logging();
+ if let Ok(bucket) = env::var("GT_OSS_BUCKET") {
+ if !bucket.is_empty() {
+ logging::info!("Running oss test.");
+
+ let root = uuid::Uuid::new_v4().to_string();
+
+ let accessor = oss::Builder::default()
+ .root(&root)
+ .access_key_id(&env::var("GT_OSS_ACCESS_KEY_ID")?)
+ .access_key_secret(&env::var("GT_OSS_ACCESS_KEY")?)
+ .bucket(&bucket)
+ .build()?;
+
+ let store = ObjectStore::new(accessor);
+
+ let mut guard = TempFolder::new(&store, "/");
+ test_object_crud(&store).await?;
+ test_object_list(&store).await?;
+ guard.remove_all().await?;
+ }
+ }
+
+ Ok(())
+}
diff --git a/tests-integration/README.md b/tests-integration/README.md
index ec0905504a01..8db35ae7b4a4 100644
--- a/tests-integration/README.md
+++ b/tests-integration/README.md
@@ -11,6 +11,7 @@ GT_S3_ACCESS_KEY_ID=S3 access key id
GT_S3_ACCESS_KEY=S3 secret access key
```
+
## Run
Execute the following command in the project root folder:
@@ -24,3 +25,9 @@ Test s3 storage:
```
cargo test s3
```
+
+Test oss storage:
+
+```
+cargo test oss
+```
diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs
index a6353628a702..978495cec796 100644
--- a/tests-integration/src/test_util.rs
+++ b/tests-integration/src/test_util.rs
@@ -23,7 +23,9 @@ use axum::Router;
use catalog::CatalogManagerRef;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
use common_runtime::Builder as RuntimeBuilder;
-use datanode::datanode::{DatanodeOptions, ObjectStoreConfig, WalConfig};
+use datanode::datanode::{
+ DatanodeOptions, FileConfig, ObjectStoreConfig, OssConfig, S3Config, WalConfig,
+};
use datanode::error::{CreateTableSnafu, Result};
use datanode::instance::{Instance, InstanceRef};
use datanode::sql::SqlHandler;
@@ -31,6 +33,7 @@ use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnSchema, SchemaBuilder};
use frontend::instance::Instance as FeInstance;
use object_store::backend::s3;
+use object_store::services::oss;
use object_store::test_util::TempFolder;
use object_store::ObjectStore;
use once_cell::sync::OnceCell;
@@ -57,6 +60,7 @@ fn get_port() -> usize {
pub enum StorageType {
S3,
File,
+ Oss,
}
impl StorageType {
@@ -72,6 +76,13 @@ impl StorageType {
false
}
}
+ StorageType::Oss => {
+ if let Ok(b) = env::var("GT_OSS_BUCKET") {
+ !b.is_empty()
+ } else {
+ false
+ }
+ }
}
}
}
@@ -83,29 +94,53 @@ fn get_test_store_config(
let _ = dotenv::dotenv();
match store_type {
- StorageType::S3 => {
- let root = uuid::Uuid::new_v4().to_string();
- let key_id = env::var("GT_S3_ACCESS_KEY_ID").unwrap();
- let secret_key = env::var("GT_S3_ACCESS_KEY").unwrap();
- let bucket = env::var("GT_S3_BUCKET").unwrap();
+ StorageType::Oss => {
+ let oss_config = OssConfig {
+ root: uuid::Uuid::new_v4().to_string(),
+ access_key_id: env::var("GT_OSS_ACCESS_KEY_ID").unwrap(),
+ access_key_secret: env::var("GT_OSS_ACCESS_KEY").unwrap(),
+ bucket: env::var("GT_OSS_BUCKET").unwrap(),
+ endpoint: env::var("GT_OSS_ENDPOINT").unwrap(),
+ };
- let accessor = s3::Builder::default()
- .root(&root)
- .access_key_id(&key_id)
- .secret_access_key(&secret_key)
- .bucket(&bucket)
+ let accessor = oss::Builder::default()
+ .root(&oss_config.root)
+ .endpoint(&oss_config.endpoint)
+ .access_key_id(&oss_config.access_key_id)
+ .access_key_secret(&oss_config.access_key_secret)
+ .bucket(&oss_config.bucket)
.build()
.unwrap();
- let config = ObjectStoreConfig::S3 {
- root,
- bucket,
- access_key_id: key_id,
- secret_access_key: secret_key,
+ let config = ObjectStoreConfig::Oss(oss_config);
+
+ let store = ObjectStore::new(accessor);
+
+ (
+ config,
+ Some(TempDirGuard::Oss(TempFolder::new(&store, "/"))),
+ )
+ }
+ StorageType::S3 => {
+ let s3_config = S3Config {
+ root: uuid::Uuid::new_v4().to_string(),
+ access_key_id: env::var("GT_S3_ACCESS_KEY_ID").unwrap(),
+ secret_access_key: env::var("GT_S3_ACCESS_KEY").unwrap(),
+ bucket: env::var("GT_S3_BUCKET").unwrap(),
endpoint: None,
region: None,
};
+ let accessor = s3::Builder::default()
+ .root(&s3_config.root)
+ .access_key_id(&s3_config.access_key_id)
+ .secret_access_key(&s3_config.secret_access_key)
+ .bucket(&s3_config.bucket)
+ .build()
+ .unwrap();
+
+ let config = ObjectStoreConfig::S3(s3_config);
+
let store = ObjectStore::new(accessor);
(config, Some(TempDirGuard::S3(TempFolder::new(&store, "/"))))
@@ -114,9 +149,9 @@ fn get_test_store_config(
let data_tmp_dir = TempDir::new(&format!("gt_data_{name}")).unwrap();
(
- ObjectStoreConfig::File {
+ ObjectStoreConfig::File(FileConfig {
data_dir: data_tmp_dir.path().to_str().unwrap().to_string(),
- },
+ }),
Some(TempDirGuard::File(data_tmp_dir)),
)
}
@@ -126,6 +161,7 @@ fn get_test_store_config(
enum TempDirGuard {
File(TempDir),
S3(TempFolder),
+ Oss(TempFolder),
}
/// Create a tmp dir(will be deleted once it goes out of scope.) and a default `DatanodeOptions`,
@@ -140,6 +176,9 @@ impl TestGuard {
if let Some(TempDirGuard::S3(mut guard)) = self.data_tmp_dir.take() {
guard.remove_all().await.unwrap();
}
+ if let Some(TempDirGuard::Oss(mut guard)) = self.data_tmp_dir.take() {
+ guard.remove_all().await.unwrap();
+ }
}
}
diff --git a/tests-integration/tests/main.rs b/tests-integration/tests/main.rs
index 4f76d51aab02..94d9efbc83a1 100644
--- a/tests-integration/tests/main.rs
+++ b/tests-integration/tests/main.rs
@@ -17,5 +17,5 @@ mod grpc;
#[macro_use]
mod http;
-grpc_tests!(File, S3);
-http_tests!(File, S3);
+grpc_tests!(File, S3, Oss);
+http_tests!(File, S3, Oss);
|
feat
|
oss storage support (#911)
|
486bb2ee8e1d0f810eacd361a633ed1a0ba30a42
|
2023-05-10 13:23:06
|
WU Jingdi
|
feat: Compress manifest and checkpoint (#1497)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 6fcfb0d990e6..a8f23d71c8d6 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1621,6 +1621,7 @@ dependencies = [
"derive_builder 0.12.0",
"futures",
"object-store",
+ "paste",
"regex",
"snafu",
"tokio",
diff --git a/src/common/datasource/Cargo.toml b/src/common/datasource/Cargo.toml
index d57c83faa9bf..988bd6fb0ddf 100644
--- a/src/common/datasource/Cargo.toml
+++ b/src/common/datasource/Cargo.toml
@@ -29,6 +29,7 @@ snafu.workspace = true
tokio.workspace = true
tokio-util.workspace = true
url = "2.3"
+paste = "1.0"
[dev-dependencies]
common-test-util = { path = "../test-util" }
diff --git a/src/common/datasource/src/compression.rs b/src/common/datasource/src/compression.rs
index fcf21f5db7d5..bc840cd6a816 100644
--- a/src/common/datasource/src/compression.rs
+++ b/src/common/datasource/src/compression.rs
@@ -17,9 +17,10 @@ use std::io;
use std::str::FromStr;
use async_compression::tokio::bufread::{BzDecoder, GzipDecoder, XzDecoder, ZstdDecoder};
+use async_compression::tokio::write;
use bytes::Bytes;
use futures::Stream;
-use tokio::io::{AsyncRead, BufReader};
+use tokio::io::{AsyncRead, AsyncWriteExt, BufReader};
use tokio_util::io::{ReaderStream, StreamReader};
use crate::error::{self, Error, Result};
@@ -73,37 +74,107 @@ impl CompressionType {
!matches!(self, &Self::Uncompressed)
}
- pub fn convert_async_read<T: AsyncRead + Unpin + Send + 'static>(
- &self,
- s: T,
- ) -> Box<dyn AsyncRead + Unpin + Send> {
+ pub const fn file_extension(&self) -> &'static str {
match self {
- CompressionType::Gzip => Box::new(GzipDecoder::new(BufReader::new(s))),
- CompressionType::Bzip2 => Box::new(BzDecoder::new(BufReader::new(s))),
- CompressionType::Xz => Box::new(XzDecoder::new(BufReader::new(s))),
- CompressionType::Zstd => Box::new(ZstdDecoder::new(BufReader::new(s))),
- CompressionType::Uncompressed => Box::new(s),
+ Self::Gzip => "gz",
+ Self::Bzip2 => "bz2",
+ Self::Xz => "xz",
+ Self::Zstd => "zst",
+ Self::Uncompressed => "",
}
}
+}
- pub fn convert_stream<T: Stream<Item = io::Result<Bytes>> + Unpin + Send + 'static>(
- &self,
- s: T,
- ) -> Box<dyn Stream<Item = io::Result<Bytes>> + Send + Unpin> {
- match self {
- CompressionType::Gzip => {
- Box::new(ReaderStream::new(GzipDecoder::new(StreamReader::new(s))))
- }
- CompressionType::Bzip2 => {
- Box::new(ReaderStream::new(BzDecoder::new(StreamReader::new(s))))
- }
- CompressionType::Xz => {
- Box::new(ReaderStream::new(XzDecoder::new(StreamReader::new(s))))
+macro_rules! impl_compression_type {
+ ($(($enum_item:ident, $prefix:ident)),*) => {
+ paste::item! {
+ impl CompressionType {
+ pub async fn encode(&self, content: impl AsRef<[u8]>) -> io::Result<Vec<u8>> {
+ match self {
+ $(
+ CompressionType::$enum_item => {
+ let mut buffer = Vec::with_capacity(content.as_ref().len());
+ let mut encoder = write::[<$prefix Encoder>]::new(&mut buffer);
+ encoder.write_all(content.as_ref()).await?;
+ encoder.shutdown().await?;
+ Ok(buffer)
+ }
+ )*
+ CompressionType::Uncompressed => Ok(content.as_ref().to_vec()),
+ }
+ }
+
+ pub async fn decode(&self, content: impl AsRef<[u8]>) -> io::Result<Vec<u8>> {
+ match self {
+ $(
+ CompressionType::$enum_item => {
+ let mut buffer = Vec::with_capacity(content.as_ref().len() * 2);
+ let mut encoder = write::[<$prefix Decoder>]::new(&mut buffer);
+ encoder.write_all(content.as_ref()).await?;
+ encoder.shutdown().await?;
+ Ok(buffer)
+ }
+ )*
+ CompressionType::Uncompressed => Ok(content.as_ref().to_vec()),
+ }
+ }
+
+ pub fn convert_async_read<T: AsyncRead + Unpin + Send + 'static>(
+ &self,
+ s: T,
+ ) -> Box<dyn AsyncRead + Unpin + Send> {
+ match self {
+ $(CompressionType::$enum_item => Box::new([<$prefix Decoder>]::new(BufReader::new(s))),)*
+ CompressionType::Uncompressed => Box::new(s),
+ }
+ }
+
+ pub fn convert_stream<T: Stream<Item = io::Result<Bytes>> + Unpin + Send + 'static>(
+ &self,
+ s: T,
+ ) -> Box<dyn Stream<Item = io::Result<Bytes>> + Send + Unpin> {
+ match self {
+ $(CompressionType::$enum_item => Box::new(ReaderStream::new([<$prefix Decoder>]::new(StreamReader::new(s)))),)*
+ CompressionType::Uncompressed => Box::new(s),
+ }
+ }
}
- CompressionType::Zstd => {
- Box::new(ReaderStream::new(ZstdDecoder::new(StreamReader::new(s))))
+
+ #[cfg(test)]
+ mod tests {
+ use super::CompressionType;
+
+ $(
+ #[tokio::test]
+ async fn [<test_ $enum_item:lower _compression>]() {
+ let string = "foo_bar".as_bytes().to_vec();
+ let compress = CompressionType::$enum_item
+ .encode(&string)
+ .await
+ .unwrap();
+ let decompress = CompressionType::$enum_item
+ .decode(&compress)
+ .await
+ .unwrap();
+ assert_eq!(decompress, string);
+ })*
+
+ #[tokio::test]
+ async fn test_uncompression() {
+ let string = "foo_bar".as_bytes().to_vec();
+ let compress = CompressionType::Uncompressed
+ .encode(&string)
+ .await
+ .unwrap();
+ let decompress = CompressionType::Uncompressed
+ .decode(&compress)
+ .await
+ .unwrap();
+ assert_eq!(decompress, string);
+ }
}
- CompressionType::Uncompressed => Box::new(s),
}
- }
+ };
}
+
+impl_compression_type!((Gzip, Gzip), (Bzip2, Bz), (Xz, Xz), (Zstd, Zstd));
diff --git a/src/storage/src/error.rs b/src/storage/src/error.rs
index 9bef856610dd..692b6bf3466c 100644
--- a/src/storage/src/error.rs
+++ b/src/storage/src/error.rs
@@ -16,6 +16,7 @@ use std::any::Any;
use std::io::Error as IoError;
use std::str::Utf8Error;
+use common_datasource::compression::CompressionType;
use common_error::prelude::*;
use common_runtime::error::Error as RuntimeError;
use datatypes::arrow::error::ArrowError;
@@ -83,6 +84,30 @@ pub enum Error {
source: object_store::Error,
},
+ #[snafu(display(
+ "Fail to compress object by {}, path: {}, source: {}",
+ compress_type,
+ path,
+ source
+ ))]
+ CompressObject {
+ compress_type: CompressionType,
+ path: String,
+ source: std::io::Error,
+ },
+
+ #[snafu(display(
+ "Fail to decompress object by {}, path: {}, source: {}",
+ compress_type,
+ path,
+ source
+ ))]
+ DecompressObject {
+ compress_type: CompressionType,
+ path: String,
+ source: std::io::Error,
+ },
+
#[snafu(display("Fail to list objects in path: {}, source: {}", path, source))]
ListObjects {
path: String,
@@ -517,6 +542,8 @@ impl ErrorExt for Error {
| DecodeArrow { .. }
| EncodeArrow { .. }
| ManifestCheckpoint { .. }
+ | CompressObject { .. }
+ | DecompressObject { .. }
| ParseSchema { .. } => StatusCode::Unexpected,
WriteParquet { .. }
diff --git a/src/storage/src/manifest/storage.rs b/src/storage/src/manifest/storage.rs
index bf350356bb30..e5ce1487ce0e 100644
--- a/src/storage/src/manifest/storage.rs
+++ b/src/storage/src/manifest/storage.rs
@@ -14,8 +14,10 @@
use std::collections::HashMap;
use std::iter::Iterator;
+use std::str::FromStr;
use async_trait::async_trait;
+use common_datasource::compression::CompressionType;
use common_telemetry::logging;
use futures::TryStreamExt;
use lazy_static::lazy_static;
@@ -26,16 +28,21 @@ use snafu::{ensure, ResultExt};
use store_api::manifest::{LogIterator, ManifestLogStorage, ManifestVersion};
use crate::error::{
- DecodeJsonSnafu, DeleteObjectSnafu, EncodeJsonSnafu, Error, InvalidScanIndexSnafu,
- ListObjectsSnafu, ReadObjectSnafu, Result, Utf8Snafu, WriteObjectSnafu,
+ CompressObjectSnafu, DecodeJsonSnafu, DecompressObjectSnafu, DeleteObjectSnafu,
+ EncodeJsonSnafu, Error, InvalidScanIndexSnafu, ListObjectsSnafu, ReadObjectSnafu, Result,
+ Utf8Snafu, WriteObjectSnafu,
};
lazy_static! {
- static ref DELTA_RE: Regex = Regex::new("^\\d+\\.json$").unwrap();
+ static ref DELTA_RE: Regex = Regex::new("^\\d+\\.json").unwrap();
static ref CHECKPOINT_RE: Regex = Regex::new("^\\d+\\.checkpoint").unwrap();
}
const LAST_CHECKPOINT_FILE: &str = "_last_checkpoint";
+const DEFAULT_MANIFEST_COMPRESSION_TYPE: CompressionType = CompressionType::Uncompressed;
+/// Due to backward compatibility, it is possible that the user's manifest file has not been compressed.
+/// So when we encounter problems, we need to fall back to `FALL_BACK_COMPRESS_TYPE` for processing.
+const FALL_BACK_COMPRESS_TYPE: CompressionType = CompressionType::Uncompressed;
#[inline]
pub fn delta_file(version: ManifestVersion) -> String {
@@ -47,6 +54,15 @@ pub fn checkpoint_file(version: ManifestVersion) -> String {
format!("{version:020}.checkpoint")
}
+#[inline]
+pub fn gen_path(path: &str, file: &str, compress_type: CompressionType) -> String {
+ if compress_type == CompressionType::Uncompressed {
+ format!("{}{}", path, file)
+ } else {
+ format!("{}{}.{}", path, file, compress_type.file_extension())
+ }
+}
+
/// Return's the file manifest version from path
///
/// # Panics
@@ -57,6 +73,16 @@ pub fn file_version(path: &str) -> ManifestVersion {
s.parse().unwrap_or_else(|_| panic!("Invalid file: {path}"))
}
+/// Return's the file compress algorithm by file extension.
+///
+/// for example file
+/// `00000000000000000000.json.gz` -> `CompressionType::GZIP`
+#[inline]
+pub fn file_compress_type(path: &str) -> CompressionType {
+ let s = path.rsplit('.').next().unwrap_or("");
+ CompressionType::from_str(s).unwrap_or(CompressionType::Uncompressed)
+}
+
#[inline]
pub fn is_delta_file(file_name: &str) -> bool {
DELTA_RE.is_match(file_name)
@@ -79,12 +105,20 @@ impl LogIterator for ObjectStoreLogIterator {
async fn next_log(&mut self) -> Result<Option<(ManifestVersion, Vec<u8>)>> {
match self.iter.next() {
Some((v, entry)) => {
+ let compress_type = file_compress_type(entry.name());
let bytes = self
.object_store
.read(entry.path())
.await
.context(ReadObjectSnafu { path: entry.path() })?;
- Ok(Some((v, bytes)))
+ let data = compress_type
+ .decode(bytes)
+ .await
+ .context(DecompressObjectSnafu {
+ compress_type,
+ path: entry.path(),
+ })?;
+ Ok(Some((v, data)))
}
None => Ok(None),
}
@@ -94,6 +128,7 @@ impl LogIterator for ObjectStoreLogIterator {
#[derive(Clone, Debug)]
pub struct ManifestObjectStore {
object_store: ObjectStore,
+ compress_type: CompressionType,
path: String,
}
@@ -101,25 +136,49 @@ impl ManifestObjectStore {
pub fn new(path: &str, object_store: ObjectStore) -> Self {
Self {
object_store,
+ //TODO: make it configurable
+ compress_type: DEFAULT_MANIFEST_COMPRESSION_TYPE,
path: util::normalize_dir(path),
}
}
#[inline]
+ /// Returns the delta file path under the **current** compression algorithm
fn delta_file_path(&self, version: ManifestVersion) -> String {
- format!("{}{}", self.path, delta_file(version))
+ gen_path(&self.path, &delta_file(version), self.compress_type)
}
#[inline]
+ /// Returns the checkpoint file path under the **current** compression algorithm
fn checkpoint_file_path(&self, version: ManifestVersion) -> String {
- format!("{}{}", self.path, checkpoint_file(version))
+ gen_path(&self.path, &checkpoint_file(version), self.compress_type)
}
#[inline]
+ /// Returns the last checkpoint path, because the last checkpoint is not compressed,
+ /// so its path name has nothing to do with the compression algorithm used by `ManifestObjectStore`
fn last_checkpoint_path(&self) -> String {
format!("{}{}", self.path, LAST_CHECKPOINT_FILE)
}
+ /// Return all `R`s in the root directory that meet the `filter` conditions (that is, the `filter` closure returns `Some(R)`),
+ /// and discard `R` that does not meet the conditions (that is, the `filter` closure returns `None`)
+ async fn get_paths<F, R>(&self, filter: F) -> Result<Vec<R>>
+ where
+ F: Fn(Entry) -> Option<R>,
+ {
+ let streamer = self
+ .object_store
+ .list(&self.path)
+ .await
+ .context(ListObjectsSnafu { path: &self.path })?;
+ streamer
+ .try_filter_map(|e| async { Ok(filter(e)) })
+ .try_collect::<Vec<_>>()
+ .await
+ .context(ListObjectsSnafu { path: &self.path })
+ }
+
pub(crate) fn path(&self) -> &str {
&self.path
}
@@ -158,29 +217,18 @@ impl ManifestLogStorage for ManifestObjectStore {
) -> Result<ObjectStoreLogIterator> {
ensure!(start <= end, InvalidScanIndexSnafu { start, end });
- let streamer = self
- .object_store
- .list(&self.path)
- .await
- .context(ListObjectsSnafu { path: &self.path })?;
-
- let mut entries: Vec<(ManifestVersion, Entry)> = streamer
- .try_filter_map(|e| async move {
- let file_name = e.name();
+ let mut entries: Vec<(ManifestVersion, Entry)> = self
+ .get_paths(|entry| {
+ let file_name = entry.name();
if is_delta_file(file_name) {
let version = file_version(file_name);
- if version >= start && version < end {
- Ok(Some((version, e)))
- } else {
- Ok(None)
+ if start <= version && version < end {
+ return Some((version, entry));
}
- } else {
- Ok(None)
}
+ None
})
- .try_collect::<Vec<_>>()
- .await
- .context(ListObjectsSnafu { path: &self.path })?;
+ .await?;
entries.sort_unstable_by(|(v1, _), (v2, _)| v1.cmp(v2));
@@ -195,31 +243,20 @@ impl ManifestLogStorage for ManifestObjectStore {
end: ManifestVersion,
keep_last_checkpoint: bool,
) -> Result<usize> {
- let streamer = self
- .object_store
- .list(&self.path)
- .await
- .context(ListObjectsSnafu { path: &self.path })?;
-
// Stores (entry, is_checkpoint, version) in a Vec.
- let entries: Vec<_> = streamer
- .try_filter_map(|e| async move {
- let file_name = e.name();
+ let entries: Vec<_> = self
+ .get_paths(|entry| {
+ let file_name = entry.name();
let is_checkpoint = is_checkpoint_file(file_name);
if is_delta_file(file_name) || is_checkpoint_file(file_name) {
let version = file_version(file_name);
if version < end {
- Ok(Some((e, is_checkpoint, version)))
- } else {
- Ok(None)
+ return Some((entry, is_checkpoint, version));
}
- } else {
- Ok(None)
}
+ None
})
- .try_collect::<Vec<_>>()
- .await
- .context(ListObjectsSnafu { path: &self.path })?;
+ .await?;
let checkpoint_version = if keep_last_checkpoint {
// Note that the order of entries is unspecific.
entries
@@ -237,7 +274,6 @@ impl ManifestLogStorage for ManifestObjectStore {
} else {
None
};
-
let paths: Vec<_> = entries
.iter()
.filter(|(_e, is_checkpoint, version)| {
@@ -279,19 +315,37 @@ impl ManifestLogStorage for ManifestObjectStore {
async fn save(&self, version: ManifestVersion, bytes: &[u8]) -> Result<()> {
let path = self.delta_file_path(version);
-
logging::debug!("Save log to manifest storage, version: {}", version);
-
+ let data = self
+ .compress_type
+ .encode(bytes)
+ .await
+ .context(CompressObjectSnafu {
+ compress_type: self.compress_type,
+ path: &path,
+ })?;
self.object_store
- .write(&path, bytes.to_vec())
+ .write(&path, data)
.await
.context(WriteObjectSnafu { path })
}
async fn delete(&self, start: ManifestVersion, end: ManifestVersion) -> Result<()> {
- let raw_paths = (start..end)
- .map(|v| self.delta_file_path(v))
- .collect::<Vec<_>>();
+ ensure!(start <= end, InvalidScanIndexSnafu { start, end });
+
+ // Due to backward compatibility, it is possible that the user's log between start and end has not been compressed,
+ // so we need to delete the uncompressed file corresponding to that version, even if the uncompressed file in that version do not exist.
+ let mut paths = Vec::with_capacity(((end - start) * 2) as usize);
+ for version in start..end {
+ paths.push(raw_normalize_path(&self.delta_file_path(version)));
+ if self.compress_type != FALL_BACK_COMPRESS_TYPE {
+ paths.push(raw_normalize_path(&gen_path(
+ &self.path,
+ &delta_file(version),
+ FALL_BACK_COMPRESS_TYPE,
+ )));
+ }
+ }
logging::debug!(
"Deleting logs from manifest storage, start: {}, end: {}",
@@ -299,16 +353,11 @@ impl ManifestLogStorage for ManifestObjectStore {
end
);
- let paths = raw_paths
- .iter()
- .map(|p| raw_normalize_path(p))
- .collect::<Vec<_>>();
-
self.object_store
- .remove(paths)
+ .remove(paths.clone())
.await
.with_context(|_| DeleteObjectSnafu {
- path: raw_paths.join(","),
+ path: paths.join(","),
})?;
Ok(())
@@ -316,11 +365,20 @@ impl ManifestLogStorage for ManifestObjectStore {
async fn save_checkpoint(&self, version: ManifestVersion, bytes: &[u8]) -> Result<()> {
let path = self.checkpoint_file_path(version);
+ let data = self
+ .compress_type
+ .encode(bytes)
+ .await
+ .context(CompressObjectSnafu {
+ compress_type: self.compress_type,
+ path: &path,
+ })?;
self.object_store
- .write(&path, bytes.to_vec())
+ .write(&path, data)
.await
.context(WriteObjectSnafu { path })?;
+ // Because last checkpoint file only contain size and version, which is tiny, so we don't compress it.
let last_checkpoint_path = self.last_checkpoint_path();
let checkpoint_metadata = CheckpointMetadata {
@@ -337,7 +395,6 @@ impl ManifestLogStorage for ManifestObjectStore {
);
let bs = checkpoint_metadata.encode()?;
-
self.object_store
.write(&last_checkpoint_path, bs.as_ref().to_vec())
.await
@@ -353,27 +410,88 @@ impl ManifestLogStorage for ManifestObjectStore {
version: ManifestVersion,
) -> Result<Option<(ManifestVersion, Vec<u8>)>> {
let path = self.checkpoint_file_path(version);
- match self.object_store.read(&path).await {
- Ok(checkpoint) => Ok(Some((version, checkpoint))),
- Err(e) if e.kind() == ErrorKind::NotFound => Ok(None),
- Err(e) => Err(e).context(ReadObjectSnafu { path }),
- }
+ // Due to backward compatibility, it is possible that the user's checkpoint not compressed,
+ // so if we don't find file by compressed type. fall back to checkpoint not compressed find again.
+ let checkpoint_data =
+ match self.object_store.read(&path).await {
+ Ok(checkpoint) => {
+ let decompress_data = self.compress_type.decode(checkpoint).await.context(
+ DecompressObjectSnafu {
+ compress_type: self.compress_type,
+ path,
+ },
+ )?;
+ Ok(Some(decompress_data))
+ }
+ Err(e) => {
+ if e.kind() == ErrorKind::NotFound {
+ if self.compress_type != FALL_BACK_COMPRESS_TYPE {
+ let fall_back_path = gen_path(
+ &self.path,
+ &checkpoint_file(version),
+ FALL_BACK_COMPRESS_TYPE,
+ );
+ logging::debug!(
+ "Failed to load checkpoint from path: {}, fall back to path: {}",
+ path,
+ fall_back_path
+ );
+ match self.object_store.read(&fall_back_path).await {
+ Ok(checkpoint) => {
+ let decompress_data = FALL_BACK_COMPRESS_TYPE
+ .decode(checkpoint)
+ .await
+ .context(DecompressObjectSnafu {
+ compress_type: FALL_BACK_COMPRESS_TYPE,
+ path,
+ })?;
+ Ok(Some(decompress_data))
+ }
+ Err(e) if e.kind() == ErrorKind::NotFound => Ok(None),
+ Err(e) => Err(e).context(ReadObjectSnafu {
+ path: &fall_back_path,
+ }),
+ }
+ } else {
+ Ok(None)
+ }
+ } else {
+ Err(e).context(ReadObjectSnafu { path: &path })
+ }
+ }
+ }?;
+ Ok(checkpoint_data.map(|data| (version, data)))
}
async fn delete_checkpoint(&self, version: ManifestVersion) -> Result<()> {
- let path = self.checkpoint_file_path(version);
+ // Due to backward compatibility, it is possible that the user's checkpoint file has not been compressed,
+ // so we need to delete the uncompressed checkpoint file corresponding to that version, even if the uncompressed checkpoint file in that version do not exist.
+ let paths = if self.compress_type != FALL_BACK_COMPRESS_TYPE {
+ vec![
+ raw_normalize_path(&self.checkpoint_file_path(version)),
+ raw_normalize_path(&gen_path(
+ &self.path,
+ &checkpoint_file(version),
+ FALL_BACK_COMPRESS_TYPE,
+ )),
+ ]
+ } else {
+ vec![raw_normalize_path(&self.checkpoint_file_path(version))]
+ };
+
self.object_store
- .delete(&path)
+ .remove(paths.clone())
.await
- .context(DeleteObjectSnafu { path })?;
+ .context(DeleteObjectSnafu {
+ path: paths.join(","),
+ })?;
Ok(())
}
async fn load_last_checkpoint(&self) -> Result<Option<(ManifestVersion, Vec<u8>)>> {
let last_checkpoint_path = self.last_checkpoint_path();
-
let last_checkpoint_data = match self.object_store.read(&last_checkpoint_path).await {
- Ok(last_checkpoint_data) => last_checkpoint_data,
+ Ok(data) => data,
Err(e) if e.kind() == ErrorKind::NotFound => {
return Ok(None);
}
@@ -404,16 +522,39 @@ mod tests {
use super::*;
- #[tokio::test]
- async fn test_manifest_log_store() {
+ fn new_test_manifest_store() -> ManifestObjectStore {
common_telemetry::init_default_ut_logging();
let tmp_dir = create_temp_dir("test_manifest_log_store");
let mut builder = Fs::default();
builder.root(&tmp_dir.path().to_string_lossy());
let object_store = ObjectStore::new(builder).unwrap().finish();
+ ManifestObjectStore::new("/", object_store)
+ }
+
+ #[test]
+ // Define this test mainly to prevent future unintentional changes may break the backward compatibility.
+ fn test_compress_file_path_generation() {
+ let path = "/foo/bar/";
+ let version: ManifestVersion = 0;
+ let file_path = gen_path(path, &delta_file(version), CompressionType::Gzip);
+ assert_eq!(file_path.as_str(), "/foo/bar/00000000000000000000.json.gz")
+ }
- let log_store = ManifestObjectStore::new("/", object_store);
+ #[tokio::test]
+ async fn test_manifest_log_store_uncompress() {
+ let mut log_store = new_test_manifest_store();
+ log_store.compress_type = CompressionType::Uncompressed;
+ test_manifest_log_store_case(log_store).await;
+ }
+ #[tokio::test]
+ async fn test_manifest_log_store_compress() {
+ let mut log_store = new_test_manifest_store();
+ log_store.compress_type = CompressionType::Gzip;
+ test_manifest_log_store_case(log_store).await;
+ }
+
+ async fn test_manifest_log_store_case(log_store: ManifestObjectStore) {
for v in 0..5 {
log_store
.save(v, format!("hello, {v}").as_bytes())
@@ -477,4 +618,73 @@ mod tests {
let mut it = log_store.scan(0, 11).await.unwrap();
assert!(it.next_log().await.unwrap().is_none());
}
+
+ #[tokio::test]
+ // test ManifestObjectStore can read/delete previously uncompressed data correctly
+ async fn test_compress_backward_compatible() {
+ let mut log_store = new_test_manifest_store();
+
+ // write uncompress data to stimulate previously uncompressed data
+ log_store.compress_type = CompressionType::Uncompressed;
+ for v in 0..5 {
+ log_store
+ .save(v, format!("hello, {v}").as_bytes())
+ .await
+ .unwrap();
+ }
+ log_store
+ .save_checkpoint(5, "checkpoint_uncompressed".as_bytes())
+ .await
+ .unwrap();
+
+ // change compress type
+ log_store.compress_type = CompressionType::Gzip;
+
+ // test load_last_checkpoint work correctly for previously uncompressed data
+ let (v, checkpoint) = log_store.load_last_checkpoint().await.unwrap().unwrap();
+ assert_eq!(v, 5);
+ assert_eq!(checkpoint, "checkpoint_uncompressed".as_bytes());
+
+ // write compressed data to stimulate compress alogorithom take effect
+ for v in 5..10 {
+ log_store
+ .save(v, format!("hello, {v}").as_bytes())
+ .await
+ .unwrap();
+ }
+ log_store
+ .save_checkpoint(10, "checkpoint_compressed".as_bytes())
+ .await
+ .unwrap();
+
+ // test data reading
+ let mut it = log_store.scan(0, 10).await.unwrap();
+ for v in 0..10 {
+ let (version, bytes) = it.next_log().await.unwrap().unwrap();
+ assert_eq!(v, version);
+ assert_eq!(format!("hello, {v}").as_bytes(), bytes);
+ }
+ let (v, checkpoint) = log_store.load_checkpoint(5).await.unwrap().unwrap();
+ assert_eq!(v, 5);
+ assert_eq!(checkpoint, "checkpoint_uncompressed".as_bytes());
+ let (v, checkpoint) = log_store.load_last_checkpoint().await.unwrap().unwrap();
+ assert_eq!(v, 10);
+ assert_eq!(checkpoint, "checkpoint_compressed".as_bytes());
+
+ // Delete previously uncompressed checkpoint
+ log_store.delete_checkpoint(5).await.unwrap();
+ assert!(log_store.load_checkpoint(5).await.unwrap().is_none());
+
+ // Delete [3, 7), contain uncompressed/compressed data
+ log_store.delete(3, 7).await.unwrap();
+ // [3, 7) deleted
+ let mut it = log_store.scan(3, 7).await.unwrap();
+ assert!(it.next_log().await.unwrap().is_none());
+
+ // Delete util 10, contain uncompressed/compressed data
+ // log 0, 1, 2, 7, 8, 9 will be delete
+ assert_eq!(6, log_store.delete_until(10, false).await.unwrap());
+ let mut it = log_store.scan(0, 10).await.unwrap();
+ assert!(it.next_log().await.unwrap().is_none());
+ }
}
|
feat
|
Compress manifest and checkpoint (#1497)
|
6341fb86c727637fad6aad9d1122247479063155
|
2025-02-06 14:59:57
|
Ruihang Xia
|
feat: write memtable in parallel (#5456)
| false
|
diff --git a/src/mito2/src/region/opener.rs b/src/mito2/src/region/opener.rs
index 1071a2ffb263..2992a475ab35 100644
--- a/src/mito2/src/region/opener.rs
+++ b/src/mito2/src/region/opener.rs
@@ -554,7 +554,7 @@ where
// set next_entry_id and write to memtable.
region_write_ctx.set_next_entry_id(last_entry_id + 1);
- region_write_ctx.write_memtable();
+ region_write_ctx.write_memtable().await;
}
// TODO(weny): We need to update `flushed_entry_id` in the region manifest
diff --git a/src/mito2/src/region_write_ctx.rs b/src/mito2/src/region_write_ctx.rs
index 0047822b4d15..2a1f935245ed 100644
--- a/src/mito2/src/region_write_ctx.rs
+++ b/src/mito2/src/region_write_ctx.rs
@@ -16,6 +16,7 @@ use std::mem;
use std::sync::Arc;
use api::v1::{Mutation, OpType, Rows, WalEntry, WriteHint};
+use futures::stream::{FuturesUnordered, StreamExt};
use snafu::ResultExt;
use store_api::logstore::provider::Provider;
use store_api::logstore::LogStore;
@@ -197,23 +198,43 @@ impl RegionWriteCtx {
}
/// Consumes mutations and writes them into mutable memtable.
- pub(crate) fn write_memtable(&mut self) {
+ pub(crate) async fn write_memtable(&mut self) {
debug_assert_eq!(self.notifiers.len(), self.wal_entry.mutations.len());
if self.failed {
return;
}
- let mutable = &self.version.memtables.mutable;
- // Takes mutations from the wal entry.
- let mutations = mem::take(&mut self.wal_entry.mutations);
- for (mutation, notify) in mutations.into_iter().zip(&mut self.notifiers) {
- // Write mutation to the memtable.
- let Some(kvs) = KeyValues::new(&self.version.metadata, mutation) else {
- continue;
- };
- if let Err(e) = mutable.write(&kvs) {
- notify.err = Some(Arc::new(e));
+ let mutable = self.version.memtables.mutable.clone();
+ let mutations = mem::take(&mut self.wal_entry.mutations)
+ .into_iter()
+ .enumerate()
+ .filter_map(|(i, mutation)| {
+ let kvs = KeyValues::new(&self.version.metadata, mutation)?;
+ Some((i, kvs))
+ })
+ .collect::<Vec<_>>();
+
+ if mutations.len() == 1 {
+ if let Err(err) = mutable.write(&mutations[0].1) {
+ self.notifiers[mutations[0].0].err = Some(Arc::new(err));
+ }
+ } else {
+ let mut tasks = FuturesUnordered::new();
+ for (i, kvs) in mutations {
+ let mutable = mutable.clone();
+ // use tokio runtime to schedule tasks.
+ tasks.push(common_runtime::spawn_blocking_global(move || {
+ (i, mutable.write(&kvs))
+ }));
+ }
+
+ while let Some(result) = tasks.next().await {
+ // first unwrap the result from `spawn` above
+ let (i, result) = result.unwrap();
+ if let Err(err) = result {
+ self.notifiers[i].err = Some(Arc::new(err));
+ }
}
}
diff --git a/src/mito2/src/worker/handle_write.rs b/src/mito2/src/worker/handle_write.rs
index 5f9fdd698c79..2804aabf1a24 100644
--- a/src/mito2/src/worker/handle_write.rs
+++ b/src/mito2/src/worker/handle_write.rs
@@ -18,7 +18,7 @@ use std::collections::{hash_map, HashMap};
use std::sync::Arc;
use api::v1::OpType;
-use common_telemetry::debug;
+use common_telemetry::{debug, error};
use snafu::ensure;
use store_api::codec::PrimaryKeyEncoding;
use store_api::logstore::LogStore;
@@ -105,10 +105,35 @@ impl<S: LogStore> RegionWorkerLoop<S> {
let _timer = WRITE_STAGE_ELAPSED
.with_label_values(&["write_memtable"])
.start_timer();
- for mut region_ctx in region_ctxs.into_values() {
- region_ctx.write_memtable();
+ if region_ctxs.len() == 1 {
+ // fast path for single region.
+ let mut region_ctx = region_ctxs.into_values().next().unwrap();
+ region_ctx.write_memtable().await;
put_rows += region_ctx.put_num;
delete_rows += region_ctx.delete_num;
+ } else {
+ let region_write_task = region_ctxs
+ .into_values()
+ .map(|mut region_ctx| {
+ // use tokio runtime to schedule tasks.
+ common_runtime::spawn_global(async move {
+ region_ctx.write_memtable().await;
+ (region_ctx.put_num, region_ctx.delete_num)
+ })
+ })
+ .collect::<Vec<_>>();
+
+ for result in futures::future::join_all(region_write_task).await {
+ match result {
+ Ok((put, delete)) => {
+ put_rows += put;
+ delete_rows += delete;
+ }
+ Err(e) => {
+ error!(e; "unexpected error when joining region write tasks");
+ }
+ }
+ }
}
}
WRITE_ROWS_TOTAL
|
feat
|
write memtable in parallel (#5456)
|
9e6301819842445a751b38f8fc4612b5fae2426f
|
2025-03-18 06:48:56
|
Ning Sun
|
feat: disable http timeout (#5721)
| false
|
diff --git a/config/config.md b/config/config.md
index 9b336f44ea6d..2953fdb00732 100644
--- a/config/config.md
+++ b/config/config.md
@@ -24,7 +24,7 @@
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
-| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
+| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
@@ -222,7 +222,7 @@
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
-| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
+| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
@@ -390,7 +390,7 @@
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
-| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
+| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.bind_addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
@@ -563,7 +563,7 @@
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
-| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
+| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `meta_client` | -- | -- | The metasrv client options. |
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
diff --git a/config/datanode.example.toml b/config/datanode.example.toml
index c675ea26aa55..392950adfed1 100644
--- a/config/datanode.example.toml
+++ b/config/datanode.example.toml
@@ -27,7 +27,7 @@ max_concurrent_queries = 0
## The address to bind the HTTP server.
addr = "127.0.0.1:4000"
## HTTP request timeout. Set to 0 to disable timeout.
-timeout = "30s"
+timeout = "0s"
## HTTP request body limit.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
diff --git a/config/flownode.example.toml b/config/flownode.example.toml
index 803cb7ae0840..4277f9c0059d 100644
--- a/config/flownode.example.toml
+++ b/config/flownode.example.toml
@@ -30,7 +30,7 @@ max_send_message_size = "512MB"
## The address to bind the HTTP server.
addr = "127.0.0.1:4000"
## HTTP request timeout. Set to 0 to disable timeout.
-timeout = "30s"
+timeout = "0s"
## HTTP request body limit.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
diff --git a/config/frontend.example.toml b/config/frontend.example.toml
index 46acaafd1998..3d4cd781441a 100644
--- a/config/frontend.example.toml
+++ b/config/frontend.example.toml
@@ -26,7 +26,7 @@ retry_interval = "3s"
## The address to bind the HTTP server.
addr = "127.0.0.1:4000"
## HTTP request timeout. Set to 0 to disable timeout.
-timeout = "30s"
+timeout = "0s"
## HTTP request body limit.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
diff --git a/config/standalone.example.toml b/config/standalone.example.toml
index 50b648d2dd53..0d4285d9ec63 100644
--- a/config/standalone.example.toml
+++ b/config/standalone.example.toml
@@ -34,7 +34,7 @@ max_concurrent_queries = 0
## The address to bind the HTTP server.
addr = "127.0.0.1:4000"
## HTTP request timeout. Set to 0 to disable timeout.
-timeout = "30s"
+timeout = "0s"
## HTTP request body limit.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
diff --git a/src/cmd/src/frontend.rs b/src/cmd/src/frontend.rs
index c3bb116f5db0..1fdabbf44260 100644
--- a/src/cmd/src/frontend.rs
+++ b/src/cmd/src/frontend.rs
@@ -440,7 +440,7 @@ mod tests {
[http]
addr = "127.0.0.1:4000"
- timeout = "30s"
+ timeout = "0s"
body_limit = "2GB"
[opentsdb]
@@ -461,7 +461,7 @@ mod tests {
let fe_opts = command.load_options(&Default::default()).unwrap().component;
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
- assert_eq!(Duration::from_secs(30), fe_opts.http.timeout);
+ assert_eq!(Duration::from_secs(0), fe_opts.http.timeout);
assert_eq!(ReadableSize::gb(2), fe_opts.http.body_limit);
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index 6461bcfb5526..2251f75eca6f 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -154,7 +154,7 @@ impl Default for HttpOptions {
fn default() -> Self {
Self {
addr: "127.0.0.1:4000".to_string(),
- timeout: Duration::from_secs(30),
+ timeout: Duration::from_secs(0),
disable_dashboard: false,
body_limit: DEFAULT_BODY_LIMIT,
is_strict_mode: false,
@@ -1384,7 +1384,7 @@ mod test {
fn test_http_options_default() {
let default = HttpOptions::default();
assert_eq!("127.0.0.1:4000".to_string(), default.addr);
- assert_eq!(Duration::from_secs(30), default.timeout)
+ assert_eq!(Duration::from_secs(0), default.timeout)
}
#[tokio::test]
diff --git a/src/servers/tests/http/http_handler_test.rs b/src/servers/tests/http/http_handler_test.rs
index 944b03dab9a8..31e95c1a7b95 100644
--- a/src/servers/tests/http/http_handler_test.rs
+++ b/src/servers/tests/http/http_handler_test.rs
@@ -386,7 +386,7 @@ async fn test_config() {
[http]
addr = "127.0.0.1:4000"
- timeout = "30s"
+ timeout = "0s"
body_limit = "2GB"
[logging]
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index eefe26f512df..c48547b25c8b 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -943,7 +943,7 @@ init_regions_parallelism = 16
[http]
addr = "127.0.0.1:4000"
-timeout = "30s"
+timeout = "0s"
body_limit = "64MiB"
is_strict_mode = false
cors_allowed_origins = []
|
feat
|
disable http timeout (#5721)
|
0a4444a43a7aa7132eb4a5b2efc0b0a315925a2b
|
2024-03-11 17:04:50
|
Weny Xu
|
feat(fuzz): validate columns (#3485)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index c2e8b8b9f29e..3ec7ea27d9c0 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4815,9 +4815,9 @@ dependencies = [
[[package]]
name = "libc"
-version = "0.2.151"
+version = "0.2.153"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4"
+checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
[[package]]
name = "libfuzzer-sys"
diff --git a/tests-fuzz/src/error.rs b/tests-fuzz/src/error.rs
index 9cf7728b81d2..def6414cb13b 100644
--- a/tests-fuzz/src/error.rs
+++ b/tests-fuzz/src/error.rs
@@ -46,4 +46,7 @@ pub enum Error {
error: sqlx::error::Error,
location: Location,
},
+
+ #[snafu(display("Failed to assert: {}", reason))]
+ Assert { reason: String, location: Location },
}
diff --git a/tests-fuzz/src/generator/create_expr.rs b/tests-fuzz/src/generator/create_expr.rs
index 534fac827bd2..3aeb03b9544c 100644
--- a/tests-fuzz/src/generator/create_expr.rs
+++ b/tests-fuzz/src/generator/create_expr.rs
@@ -189,10 +189,19 @@ impl<R: Rng + 'static> Generator<CreateTableExpr, R> for CreateTableExprGenerato
#[cfg(test)]
mod tests {
+ use datatypes::value::Value;
use rand::SeedableRng;
use super::*;
+ #[test]
+ fn test_float64() {
+ let value = Value::from(0.047318541668048164);
+ assert_eq!("0.047318541668048164", value.to_string());
+ let value: f64 = "0.047318541668048164".parse().unwrap();
+ assert_eq!("0.047318541668048164", value.to_string());
+ }
+
#[test]
fn test_create_table_expr_generator() {
let mut rng = rand::thread_rng();
diff --git a/tests-fuzz/src/lib.rs b/tests-fuzz/src/lib.rs
index 2666a35051c1..406927d6b46b 100644
--- a/tests-fuzz/src/lib.rs
+++ b/tests-fuzz/src/lib.rs
@@ -22,6 +22,7 @@ pub mod generator;
pub mod ir;
pub mod translator;
pub mod utils;
+pub mod validator;
#[cfg(test)]
pub mod test_utils;
diff --git a/tests-fuzz/src/validator.rs b/tests-fuzz/src/validator.rs
new file mode 100644
index 000000000000..198d009a152b
--- /dev/null
+++ b/tests-fuzz/src/validator.rs
@@ -0,0 +1,15 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+pub mod column;
diff --git a/tests-fuzz/src/validator/column.rs b/tests-fuzz/src/validator/column.rs
new file mode 100644
index 000000000000..5b148f7f02bf
--- /dev/null
+++ b/tests-fuzz/src/validator/column.rs
@@ -0,0 +1,240 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use common_telemetry::debug;
+use datatypes::data_type::DataType;
+use snafu::{ensure, ResultExt};
+use sqlx::database::HasArguments;
+use sqlx::{ColumnIndex, Database, Decode, Encode, Executor, IntoArguments, Type};
+
+use crate::error::{self, Result};
+use crate::ir::create_expr::ColumnOption;
+use crate::ir::{Column, Ident};
+
+#[derive(Debug, sqlx::FromRow)]
+pub struct ColumnEntry {
+ pub table_schema: String,
+ pub table_name: String,
+ pub column_name: String,
+ pub data_type: String,
+ pub semantic_type: String,
+ pub column_default: Option<String>,
+ pub is_nullable: String,
+}
+
+fn is_nullable(str: &str) -> bool {
+ str.to_uppercase() == "YES"
+}
+
+impl PartialEq<Column> for ColumnEntry {
+ fn eq(&self, other: &Column) -> bool {
+ // Checks `table_name`
+ if other.name.value != self.column_name {
+ debug!(
+ "expected name: {}, got: {}",
+ other.name.value, self.column_name
+ );
+ return false;
+ }
+ // Checks `data_type`
+ if other.column_type.name() != self.data_type {
+ debug!(
+ "expected column_type: {}, got: {}",
+ other.column_type.name(),
+ self.data_type
+ );
+ return false;
+ }
+ // Checks `column_default`
+ match &self.column_default {
+ Some(value) => {
+ let default_value_opt = other.options.iter().find(|opt| {
+ matches!(
+ opt,
+ ColumnOption::DefaultFn(_) | ColumnOption::DefaultValue(_)
+ )
+ });
+ if default_value_opt.is_none() {
+ debug!("default value options is not found");
+ return false;
+ }
+ let default_value = match default_value_opt.unwrap() {
+ ColumnOption::DefaultValue(v) => v.to_string(),
+ ColumnOption::DefaultFn(f) => f.to_string(),
+ _ => unreachable!(),
+ };
+ if &default_value != value {
+ debug!("expected default value: {default_value}, got: {value}");
+ return false;
+ }
+ }
+ None => {
+ if other.options.iter().any(|opt| {
+ matches!(
+ opt,
+ ColumnOption::DefaultFn(_) | ColumnOption::DefaultValue(_)
+ )
+ }) {
+ return false;
+ }
+ }
+ };
+ // Checks `is_nullable`
+ if is_nullable(&self.is_nullable) {
+ // Null is the default value. Therefore, we only ensure there is no `ColumnOption::NotNull` option.
+ if other
+ .options
+ .iter()
+ .any(|opt| matches!(opt, ColumnOption::NotNull))
+ {
+ debug!("ColumnOption::NotNull is not found");
+ return false;
+ }
+ } else {
+ // `ColumnOption::TimeIndex` imply means the field is not nullable.
+ if !other
+ .options
+ .iter()
+ .any(|opt| matches!(opt, ColumnOption::NotNull | ColumnOption::TimeIndex))
+ {
+ debug!("unexpected ColumnOption::NotNull or ColumnOption::TimeIndex");
+ return false;
+ }
+ }
+ //TODO: Checks `semantic_type`
+
+ true
+ }
+}
+
+/// Asserts [&[ColumnEntry]] is equal to [&[Column]]
+pub fn assert_eq(fetched_columns: &[ColumnEntry], columns: &[Column]) -> Result<()> {
+ ensure!(
+ columns.len() == fetched_columns.len(),
+ error::AssertSnafu {
+ reason: format!(
+ "Expected columns length: {}, got: {}",
+ columns.len(),
+ fetched_columns.len(),
+ )
+ }
+ );
+
+ for (idx, fetched) in fetched_columns.iter().enumerate() {
+ ensure!(
+ fetched == &columns[idx],
+ error::AssertSnafu {
+ reason: format!(
+ "ColumnEntry {fetched:?} is not equal to Column {:?}",
+ columns[idx]
+ )
+ }
+ );
+ }
+
+ Ok(())
+}
+
+/// Returns all [ColumnEntry] of the `table_name` from `information_schema`.
+pub async fn fetch_columns<'a, DB, E>(
+ e: E,
+ schema_name: Ident,
+ table_name: Ident,
+) -> Result<Vec<ColumnEntry>>
+where
+ DB: Database,
+ <DB as HasArguments<'a>>::Arguments: IntoArguments<'a, DB>,
+ for<'c> E: 'a + Executor<'c, Database = DB>,
+ for<'c> String: Decode<'c, DB> + Type<DB>,
+ for<'c> String: Encode<'c, DB> + Type<DB>,
+ for<'c> &'c str: ColumnIndex<<DB as Database>::Row>,
+{
+ let sql = "SELECT * FROM information_schema.columns WHERE table_schema = ? AND table_name = ?";
+ sqlx::query_as::<_, ColumnEntry>(sql)
+ .bind(schema_name.value.to_string())
+ .bind(table_name.value.to_string())
+ .fetch_all(e)
+ .await
+ .context(error::ExecuteQuerySnafu { sql })
+}
+
+#[cfg(test)]
+mod tests {
+ use datatypes::data_type::{ConcreteDataType, DataType};
+ use datatypes::value::Value;
+
+ use super::ColumnEntry;
+ use crate::ir::create_expr::ColumnOption;
+ use crate::ir::{Column, Ident};
+
+ #[test]
+ fn test_column_eq() {
+ common_telemetry::init_default_ut_logging();
+ let column_entry = ColumnEntry {
+ table_schema: String::new(),
+ table_name: String::new(),
+ column_name: "test".to_string(),
+ data_type: ConcreteDataType::int8_datatype().name(),
+ semantic_type: String::new(),
+ column_default: None,
+ is_nullable: "Yes".to_string(),
+ };
+ // Naive
+ let column = Column {
+ name: Ident::new("test"),
+ column_type: ConcreteDataType::int8_datatype(),
+ options: vec![],
+ };
+ assert!(column_entry == column);
+ // With quote
+ let column = Column {
+ name: Ident::with_quote('\'', "test"),
+ column_type: ConcreteDataType::int8_datatype(),
+ options: vec![],
+ };
+ assert!(column_entry == column);
+ // With default value
+ let column_entry = ColumnEntry {
+ table_schema: String::new(),
+ table_name: String::new(),
+ column_name: "test".to_string(),
+ data_type: ConcreteDataType::int8_datatype().to_string(),
+ semantic_type: String::new(),
+ column_default: Some("1".to_string()),
+ is_nullable: "Yes".to_string(),
+ };
+ let column = Column {
+ name: Ident::with_quote('\'', "test"),
+ column_type: ConcreteDataType::int8_datatype(),
+ options: vec![ColumnOption::DefaultValue(Value::from(1))],
+ };
+ assert!(column_entry == column);
+ // With default function
+ let column_entry = ColumnEntry {
+ table_schema: String::new(),
+ table_name: String::new(),
+ column_name: "test".to_string(),
+ data_type: ConcreteDataType::int8_datatype().to_string(),
+ semantic_type: String::new(),
+ column_default: Some("Hello()".to_string()),
+ is_nullable: "Yes".to_string(),
+ };
+ let column = Column {
+ name: Ident::with_quote('\'', "test"),
+ column_type: ConcreteDataType::int8_datatype(),
+ options: vec![ColumnOption::DefaultFn("Hello()".to_string())],
+ };
+ assert!(column_entry == column);
+ }
+}
diff --git a/tests-fuzz/targets/fuzz_create_table.rs b/tests-fuzz/targets/fuzz_create_table.rs
index f3e3cdd7f252..7af489b1c2e3 100644
--- a/tests-fuzz/targets/fuzz_create_table.rs
+++ b/tests-fuzz/targets/fuzz_create_table.rs
@@ -32,6 +32,8 @@ use tests_fuzz::ir::CreateTableExpr;
use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator;
use tests_fuzz::translator::DslTranslator;
use tests_fuzz::utils::{init_greptime_connections, Connections};
+use tests_fuzz::validator;
+use tests_fuzz::validator::column::fetch_columns;
struct FuzzContext {
greptime: Pool<MySql>,
@@ -52,7 +54,8 @@ struct FuzzInput {
impl Arbitrary<'_> for FuzzInput {
fn arbitrary(u: &mut Unstructured<'_>) -> arbitrary::Result<Self> {
let seed = u.int_in_range(u64::MIN..=u64::MAX)?;
- let columns = u.int_in_range(2..=10)?;
+ let mut rng = ChaChaRng::seed_from_u64(seed);
+ let columns = rng.gen_range(2..30);
Ok(FuzzInput { columns, seed })
}
}
@@ -64,7 +67,7 @@ fn generate_expr(input: FuzzInput) -> Result<CreateTableExpr> {
WordGenerator,
merge_two_word_map_fn(random_capitalize_map, uppercase_and_keyword_backtick_map),
)))
- .columns(rng.gen_range(1..input.columns))
+ .columns(input.columns)
.engine("mito")
.build()
.unwrap();
@@ -82,6 +85,14 @@ async fn execute_create_table(ctx: FuzzContext, input: FuzzInput) -> Result<()>
.context(error::ExecuteQuerySnafu { sql: &sql })?;
info!("Create table: {sql}, result: {result:?}");
+ // Validate columns
+ let mut column_entries =
+ fetch_columns(&ctx.greptime, "public".into(), expr.table_name.clone()).await?;
+ column_entries.sort_by(|a, b| a.column_name.cmp(&b.column_name));
+ let mut columns = expr.columns.clone();
+ columns.sort_by(|a, b| a.name.value.cmp(&b.name.value));
+ validator::column::assert_eq(&column_entries, &columns)?;
+
// Cleans up
let sql = format!("DROP TABLE {}", expr.table_name);
let result = sqlx::query(&sql)
|
feat
|
validate columns (#3485)
|
3dcd6b8e51a8a39af675de020dca90b54bfcbfc7
|
2024-11-05 08:21:32
|
Lei, HUANG
|
fix: database base ttl (#4926)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index a1e82d45e116..aeacc90ba398 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -6556,6 +6556,7 @@ dependencies = [
"common-error",
"common-function",
"common-macro",
+ "common-meta",
"common-procedure-test",
"common-query",
"common-recordbatch",
diff --git a/src/common/meta/src/key.rs b/src/common/meta/src/key.rs
index 39ca065393fc..6f63356540a0 100644
--- a/src/common/meta/src/key.rs
+++ b/src/common/meta/src/key.rs
@@ -91,6 +91,7 @@ pub mod catalog_name;
pub mod datanode_table;
pub mod flow;
pub mod node_address;
+mod schema_metadata_manager;
pub mod schema_name;
pub mod table_info;
pub mod table_name;
@@ -116,6 +117,7 @@ use flow::flow_route::FlowRouteValue;
use flow::table_flow::TableFlowValue;
use lazy_static::lazy_static;
use regex::Regex;
+pub use schema_metadata_manager::{SchemaMetadataManager, SchemaMetadataManagerRef};
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use snafu::{ensure, OptionExt, ResultExt};
diff --git a/src/common/meta/src/key/schema_metadata_manager.rs b/src/common/meta/src/key/schema_metadata_manager.rs
new file mode 100644
index 000000000000..6ee3a3112d5d
--- /dev/null
+++ b/src/common/meta/src/key/schema_metadata_manager.rs
@@ -0,0 +1,122 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Schema-level metadata manager.
+
+use std::sync::Arc;
+
+use snafu::OptionExt;
+use store_api::storage::TableId;
+
+use crate::error::TableInfoNotFoundSnafu;
+use crate::key::schema_name::{SchemaManager, SchemaNameKey};
+use crate::key::table_info::{TableInfoManager, TableInfoManagerRef};
+use crate::kv_backend::KvBackendRef;
+use crate::{error, SchemaOptions};
+
+pub type SchemaMetadataManagerRef = Arc<SchemaMetadataManager>;
+
+pub struct SchemaMetadataManager {
+ table_info_manager: TableInfoManagerRef,
+ schema_manager: SchemaManager,
+ #[cfg(any(test, feature = "testing"))]
+ kv_backend: KvBackendRef,
+}
+
+impl SchemaMetadataManager {
+ /// Creates a new database meta
+ #[cfg(not(any(test, feature = "testing")))]
+ pub fn new(kv_backend: KvBackendRef) -> Self {
+ let table_info_manager = Arc::new(TableInfoManager::new(kv_backend.clone()));
+ let schema_manager = SchemaManager::new(kv_backend);
+ Self {
+ table_info_manager,
+ schema_manager,
+ }
+ }
+
+ /// Creates a new database meta
+ #[cfg(any(test, feature = "testing"))]
+ pub fn new(kv_backend: KvBackendRef) -> Self {
+ let table_info_manager = Arc::new(TableInfoManager::new(kv_backend.clone()));
+ let schema_manager = SchemaManager::new(kv_backend.clone());
+ Self {
+ table_info_manager,
+ schema_manager,
+ kv_backend,
+ }
+ }
+
+ /// Gets schema options by table id.
+ pub async fn get_schema_options_by_table_id(
+ &self,
+ table_id: TableId,
+ ) -> error::Result<Option<SchemaOptions>> {
+ let table_info = self
+ .table_info_manager
+ .get(table_id)
+ .await?
+ .with_context(|| TableInfoNotFoundSnafu {
+ table: format!("table id: {}", table_id),
+ })?;
+
+ let key = SchemaNameKey::new(
+ &table_info.table_info.catalog_name,
+ &table_info.table_info.schema_name,
+ );
+ self.schema_manager.get(key).await
+ }
+
+ #[cfg(any(test, feature = "testing"))]
+ pub async fn register_region_table_info(
+ &self,
+ table_id: TableId,
+ table_name: &str,
+ schema_name: &str,
+ catalog_name: &str,
+ schema_value: Option<crate::key::schema_name::SchemaNameValue>,
+ ) {
+ use table::metadata::{RawTableInfo, TableType};
+ let value = crate::key::table_info::TableInfoValue::new(RawTableInfo {
+ ident: Default::default(),
+ name: table_name.to_string(),
+ desc: None,
+ catalog_name: catalog_name.to_string(),
+ schema_name: schema_name.to_string(),
+ meta: Default::default(),
+ table_type: TableType::Base,
+ });
+ let (txn, _) = self
+ .table_info_manager
+ .build_create_txn(table_id, &value)
+ .unwrap();
+ let resp = self.kv_backend.txn(txn).await.unwrap();
+ assert!(resp.succeeded, "Failed to create table metadata");
+ let key = SchemaNameKey {
+ catalog: catalog_name,
+ schema: schema_name,
+ };
+ self.schema_manager
+ .create(key, schema_value, false)
+ .await
+ .expect("Failed to create schema metadata");
+ common_telemetry::info!(
+ "Register table: {}, id: {}, schema: {}, catalog: {}",
+ table_name,
+ table_id,
+ schema_name,
+ catalog_name
+ );
+ }
+}
diff --git a/src/common/meta/src/key/table_info.rs b/src/common/meta/src/key/table_info.rs
index 615043f85326..3ea40d4f1cad 100644
--- a/src/common/meta/src/key/table_info.rs
+++ b/src/common/meta/src/key/table_info.rs
@@ -134,6 +134,7 @@ impl TableInfoValue {
}
pub type TableInfoManagerRef = Arc<TableInfoManager>;
+
#[derive(Clone)]
pub struct TableInfoManager {
kv_backend: KvBackendRef,
diff --git a/src/common/meta/src/lib.rs b/src/common/meta/src/lib.rs
index 158350bc32de..c00fd3383042 100644
--- a/src/common/meta/src/lib.rs
+++ b/src/common/meta/src/lib.rs
@@ -54,4 +54,7 @@ pub type DatanodeId = u64;
// The id of the flownode.
pub type FlownodeId = u64;
+/// Schema options.
+pub type SchemaOptions = key::schema_name::SchemaNameValue;
+
pub use instruction::RegionIdent;
diff --git a/src/datanode/src/alive_keeper.rs b/src/datanode/src/alive_keeper.rs
index a0ea2c0188bb..bf9cb16f6b67 100644
--- a/src/datanode/src/alive_keeper.rs
+++ b/src/datanode/src/alive_keeper.rs
@@ -427,7 +427,8 @@ mod test {
common_telemetry::init_default_ut_logging();
let mut region_server = mock_region_server();
let mut engine_env = TestEnv::with_prefix("region-alive-keeper");
- let engine = Arc::new(engine_env.create_engine(MitoConfig::default()).await);
+ let engine = engine_env.create_engine(MitoConfig::default()).await;
+ let engine = Arc::new(engine);
region_server.register_engine(engine.clone());
let alive_keeper = Arc::new(RegionAliveKeeper::new(region_server.clone(), 100));
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index 128a60ab9b7b..e679678745e9 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -23,6 +23,7 @@ use common_base::Plugins;
use common_error::ext::BoxedError;
use common_greptimedb_telemetry::GreptimeDBTelemetryTask;
use common_meta::key::datanode_table::{DatanodeTableManager, DatanodeTableValue};
+use common_meta::key::{SchemaMetadataManager, SchemaMetadataManagerRef};
use common_meta::kv_backend::KvBackendRef;
use common_meta::wal_options_allocator::prepare_wal_options;
pub use common_procedure::options::ProcedureConfig;
@@ -207,7 +208,10 @@ impl DatanodeBuilder {
(Box::new(NoopRegionServerEventListener) as _, None)
};
- let region_server = self.new_region_server(region_event_listener).await?;
+ let schema_metadata_manager = Arc::new(SchemaMetadataManager::new(kv_backend.clone()));
+ let region_server = self
+ .new_region_server(schema_metadata_manager, region_event_listener)
+ .await?;
let datanode_table_manager = DatanodeTableManager::new(kv_backend.clone());
let table_values = datanode_table_manager
@@ -312,6 +316,7 @@ impl DatanodeBuilder {
async fn new_region_server(
&self,
+ schema_metadata_manager: SchemaMetadataManagerRef,
event_listener: RegionServerEventListenerRef,
) -> Result<RegionServer> {
let opts: &DatanodeOptions = &self.opts;
@@ -340,8 +345,13 @@ impl DatanodeBuilder {
);
let object_store_manager = Self::build_object_store_manager(&opts.storage).await?;
- let engines =
- Self::build_store_engines(opts, object_store_manager, self.plugins.clone()).await?;
+ let engines = Self::build_store_engines(
+ opts,
+ object_store_manager,
+ schema_metadata_manager,
+ self.plugins.clone(),
+ )
+ .await?;
for engine in engines {
region_server.register_engine(engine);
}
@@ -355,6 +365,7 @@ impl DatanodeBuilder {
async fn build_store_engines(
opts: &DatanodeOptions,
object_store_manager: ObjectStoreManagerRef,
+ schema_metadata_manager: SchemaMetadataManagerRef,
plugins: Plugins,
) -> Result<Vec<RegionEngineRef>> {
let mut engines = vec![];
@@ -365,6 +376,7 @@ impl DatanodeBuilder {
opts,
object_store_manager.clone(),
config.clone(),
+ schema_metadata_manager.clone(),
plugins.clone(),
)
.await?;
@@ -390,6 +402,7 @@ impl DatanodeBuilder {
opts: &DatanodeOptions,
object_store_manager: ObjectStoreManagerRef,
config: MitoConfig,
+ schema_metadata_manager: SchemaMetadataManagerRef,
plugins: Plugins,
) -> Result<MitoEngine> {
let mito_engine = match &opts.wal {
@@ -399,6 +412,7 @@ impl DatanodeBuilder {
Self::build_raft_engine_log_store(&opts.storage.data_home, raft_engine_config)
.await?,
object_store_manager,
+ schema_metadata_manager,
plugins,
)
.await
@@ -429,6 +443,7 @@ impl DatanodeBuilder {
config,
Self::build_kafka_log_store(kafka_config, global_index_collector).await?,
object_store_manager,
+ schema_metadata_manager,
plugins,
)
.await
diff --git a/src/mito2/Cargo.toml b/src/mito2/Cargo.toml
index d4646006d6a5..eedf6ae636d8 100644
--- a/src/mito2/Cargo.toml
+++ b/src/mito2/Cargo.toml
@@ -24,6 +24,7 @@ common-datasource.workspace = true
common-decimal.workspace = true
common-error.workspace = true
common-macro.workspace = true
+common-meta.workspace = true
common-query.workspace = true
common-recordbatch.workspace = true
common-runtime.workspace = true
@@ -74,6 +75,7 @@ uuid.workspace = true
[dev-dependencies]
common-function.workspace = true
+common-meta = { workspace = true, features = ["testing"] }
common-procedure-test.workspace = true
common-test-util.workspace = true
criterion = "0.4"
diff --git a/src/mito2/src/compaction.rs b/src/mito2/src/compaction.rs
index 3cea492071c7..2c2a8f092af8 100644
--- a/src/mito2/src/compaction.rs
+++ b/src/mito2/src/compaction.rs
@@ -28,7 +28,8 @@ use std::time::{Duration, Instant};
use api::v1::region::compact_request;
use common_base::Plugins;
-use common_telemetry::{debug, error, info};
+use common_meta::key::SchemaMetadataManagerRef;
+use common_telemetry::{debug, error, info, warn};
use common_time::range::TimestampRange;
use common_time::timestamp::TimeUnit;
use common_time::Timestamp;
@@ -37,7 +38,7 @@ use datafusion_expr::Expr;
use serde::{Deserialize, Serialize};
use snafu::{OptionExt, ResultExt};
use store_api::metadata::RegionMetadataRef;
-use store_api::storage::RegionId;
+use store_api::storage::{RegionId, TableId};
use table::predicate::Predicate;
use tokio::sync::mpsc::{self, Sender};
@@ -48,8 +49,8 @@ use crate::compaction::picker::{new_picker, CompactionTask};
use crate::compaction::task::CompactionTaskImpl;
use crate::config::MitoConfig;
use crate::error::{
- CompactRegionSnafu, Error, RegionClosedSnafu, RegionDroppedSnafu, RegionTruncatedSnafu,
- RemoteCompactionSnafu, Result, TimeRangePredicateOverflowSnafu,
+ CompactRegionSnafu, Error, GetSchemaMetadataSnafu, RegionClosedSnafu, RegionDroppedSnafu,
+ RegionTruncatedSnafu, RemoteCompactionSnafu, Result, TimeRangePredicateOverflowSnafu,
};
use crate::metrics::COMPACTION_STAGE_ELAPSED;
use crate::read::projection::ProjectionMapper;
@@ -82,6 +83,7 @@ pub struct CompactionRequest {
pub(crate) cache_manager: CacheManagerRef,
pub(crate) manifest_ctx: ManifestContextRef,
pub(crate) listener: WorkerListener,
+ pub(crate) schema_metadata_manager: SchemaMetadataManagerRef,
}
impl CompactionRequest {
@@ -141,6 +143,7 @@ impl CompactionScheduler {
access_layer: &AccessLayerRef,
waiter: OptionOutputTx,
manifest_ctx: &ManifestContextRef,
+ schema_metadata_manager: SchemaMetadataManagerRef,
) -> Result<()> {
if let Some(status) = self.region_status.get_mut(®ion_id) {
// Region is compacting. Add the waiter to pending list.
@@ -158,6 +161,7 @@ impl CompactionScheduler {
self.cache_manager.clone(),
manifest_ctx,
self.listener.clone(),
+ schema_metadata_manager,
);
self.region_status.insert(region_id, status);
let result = self
@@ -173,6 +177,7 @@ impl CompactionScheduler {
&mut self,
region_id: RegionId,
manifest_ctx: &ManifestContextRef,
+ schema_metadata_manager: SchemaMetadataManagerRef,
) {
let Some(status) = self.region_status.get_mut(®ion_id) else {
return;
@@ -186,6 +191,7 @@ impl CompactionScheduler {
self.cache_manager.clone(),
manifest_ctx,
self.listener.clone(),
+ schema_metadata_manager,
);
// Try to schedule next compaction task for this region.
if let Err(e) = self
@@ -256,10 +262,23 @@ impl CompactionScheduler {
cache_manager,
manifest_ctx,
listener,
+ schema_metadata_manager,
} = request;
+
+ let ttl = find_ttl(
+ region_id.table_id(),
+ current_version.options.ttl,
+ &schema_metadata_manager,
+ )
+ .await
+ .unwrap_or_else(|e| {
+ warn!(e; "Failed to get ttl for region: {}", region_id);
+ None
+ });
+
debug!(
- "Pick compaction strategy {:?} for region: {}",
- picker, region_id
+ "Pick compaction strategy {:?} for region: {}, ttl: {:?}",
+ picker, region_id, ttl
);
let compaction_region = CompactionRegion {
@@ -273,6 +292,7 @@ impl CompactionScheduler {
access_layer: access_layer.clone(),
manifest_ctx: manifest_ctx.clone(),
file_purger: None,
+ ttl,
};
let picker_output = {
@@ -414,6 +434,24 @@ impl PendingCompaction {
}
}
+/// Finds TTL of table by first examine table options then database options.
+async fn find_ttl(
+ table_id: TableId,
+ table_ttl: Option<Duration>,
+ schema_metadata_manager: &SchemaMetadataManagerRef,
+) -> Result<Option<Duration>> {
+ if let Some(table_ttl) = table_ttl {
+ return Ok(Some(table_ttl));
+ }
+
+ let ttl = schema_metadata_manager
+ .get_schema_options_by_table_id(table_id)
+ .await
+ .context(GetSchemaMetadataSnafu)?
+ .and_then(|options| options.ttl);
+ Ok(ttl)
+}
+
/// Status of running and pending region compaction tasks.
struct CompactionStatus {
/// Id of the region.
@@ -471,6 +509,7 @@ impl CompactionStatus {
cache_manager: CacheManagerRef,
manifest_ctx: &ManifestContextRef,
listener: WorkerListener,
+ schema_metadata_manager: SchemaMetadataManagerRef,
) -> CompactionRequest {
let current_version = self.version_control.current().version;
let start_time = Instant::now();
@@ -484,6 +523,7 @@ impl CompactionStatus {
cache_manager,
manifest_ctx: manifest_ctx.clone(),
listener,
+ schema_metadata_manager,
};
if let Some(pending) = self.pending_compaction.take() {
@@ -639,6 +679,9 @@ fn get_expired_ssts(
#[cfg(test)]
mod tests {
+ use common_meta::key::SchemaMetadataManager;
+ use common_meta::kv_backend::memory::MemoryKvBackend;
+ use common_meta::kv_backend::KvBackendRef;
use tokio::sync::oneshot;
use super::*;
@@ -651,7 +694,19 @@ mod tests {
let (tx, _rx) = mpsc::channel(4);
let mut scheduler = env.mock_compaction_scheduler(tx);
let mut builder = VersionControlBuilder::new();
-
+ let schema_metadata_manager = Arc::new(SchemaMetadataManager::new(Arc::new(
+ MemoryKvBackend::new(),
+ )
+ as KvBackendRef));
+ schema_metadata_manager
+ .register_region_table_info(
+ builder.region_id().table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
// Nothing to compact.
let version_control = Arc::new(builder.build());
let (output_tx, output_rx) = oneshot::channel();
@@ -667,6 +722,7 @@ mod tests {
&env.access_layer,
waiter,
&manifest_ctx,
+ schema_metadata_manager.clone(),
)
.await
.unwrap();
@@ -686,6 +742,7 @@ mod tests {
&env.access_layer,
waiter,
&manifest_ctx,
+ schema_metadata_manager,
)
.await
.unwrap();
@@ -703,6 +760,19 @@ mod tests {
let mut builder = VersionControlBuilder::new();
let purger = builder.file_purger();
let region_id = builder.region_id();
+ let schema_metadata_manager = Arc::new(SchemaMetadataManager::new(Arc::new(
+ MemoryKvBackend::new(),
+ )
+ as KvBackendRef));
+ schema_metadata_manager
+ .register_region_table_info(
+ builder.region_id().table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
// 5 files to compact.
let end = 1000 * 1000;
@@ -726,6 +796,7 @@ mod tests {
&env.access_layer,
OptionOutputTx::none(),
&manifest_ctx,
+ schema_metadata_manager.clone(),
)
.await
.unwrap();
@@ -755,6 +826,7 @@ mod tests {
&env.access_layer,
OptionOutputTx::none(),
&manifest_ctx,
+ schema_metadata_manager.clone(),
)
.await
.unwrap();
@@ -769,7 +841,7 @@ mod tests {
// On compaction finished and schedule next compaction.
scheduler
- .on_compaction_finished(region_id, &manifest_ctx)
+ .on_compaction_finished(region_id, &manifest_ctx, schema_metadata_manager.clone())
.await;
assert_eq!(1, scheduler.region_status.len());
assert_eq!(2, job_scheduler.num_jobs());
@@ -789,6 +861,7 @@ mod tests {
&env.access_layer,
OptionOutputTx::none(),
&manifest_ctx,
+ schema_metadata_manager,
)
.await
.unwrap();
diff --git a/src/mito2/src/compaction/compactor.rs b/src/mito2/src/compaction/compactor.rs
index 004b2230536d..bf197690cf3d 100644
--- a/src/mito2/src/compaction/compactor.rs
+++ b/src/mito2/src/compaction/compactor.rs
@@ -16,7 +16,8 @@ use std::sync::Arc;
use std::time::Duration;
use api::v1::region::compact_request;
-use common_telemetry::info;
+use common_meta::key::SchemaMetadataManagerRef;
+use common_telemetry::{info, warn};
use object_store::manager::ObjectStoreManagerRef;
use serde::{Deserialize, Serialize};
use smallvec::SmallVec;
@@ -27,7 +28,7 @@ use store_api::storage::RegionId;
use crate::access_layer::{AccessLayer, AccessLayerRef, OperationType, SstWriteRequest};
use crate::cache::{CacheManager, CacheManagerRef};
use crate::compaction::picker::{new_picker, PickerOutput};
-use crate::compaction::CompactionSstReaderBuilder;
+use crate::compaction::{find_ttl, CompactionSstReaderBuilder};
use crate::config::MitoConfig;
use crate::error::{EmptyRegionDirSnafu, JoinSnafu, ObjectStoreNotFoundSnafu, Result};
use crate::manifest::action::{RegionEdit, RegionMetaAction, RegionMetaActionList};
@@ -62,6 +63,7 @@ pub struct CompactionRegion {
pub(crate) manifest_ctx: Arc<ManifestContext>,
pub(crate) current_version: VersionRef,
pub(crate) file_purger: Option<Arc<LocalFilePurger>>,
+ pub(crate) ttl: Option<Duration>,
}
/// OpenCompactionRegionRequest represents the request to open a compaction region.
@@ -78,6 +80,7 @@ pub async fn open_compaction_region(
req: &OpenCompactionRegionRequest,
mito_config: &MitoConfig,
object_store_manager: ObjectStoreManagerRef,
+ schema_metadata_manager: SchemaMetadataManagerRef,
) -> Result<CompactionRegion> {
let object_store = {
let name = &req.region_options.storage;
@@ -169,6 +172,16 @@ pub async fn open_compaction_region(
Arc::new(version)
};
+ let ttl = find_ttl(
+ req.region_id.table_id(),
+ current_version.options.ttl,
+ &schema_metadata_manager,
+ )
+ .await
+ .unwrap_or_else(|e| {
+ warn!(e; "Failed to get ttl for region: {}", region_metadata.region_id);
+ None
+ });
Ok(CompactionRegion {
region_id: req.region_id,
region_options: req.region_options.clone(),
@@ -180,6 +193,7 @@ pub async fn open_compaction_region(
manifest_ctx,
current_version,
file_purger: Some(file_purger),
+ ttl,
})
}
diff --git a/src/mito2/src/compaction/twcs.rs b/src/mito2/src/compaction/twcs.rs
index c6d2a112aad4..8af1d63eb2fa 100644
--- a/src/mito2/src/compaction/twcs.rs
+++ b/src/mito2/src/compaction/twcs.rs
@@ -212,8 +212,9 @@ impl Picker for TwcsPicker {
fn pick(&self, compaction_region: &CompactionRegion) -> Option<PickerOutput> {
let region_id = compaction_region.region_id;
let levels = compaction_region.current_version.ssts.levels();
- let ttl = compaction_region.current_version.options.ttl;
- let expired_ssts = get_expired_ssts(levels, ttl, Timestamp::current_millis());
+
+ let expired_ssts =
+ get_expired_ssts(levels, compaction_region.ttl, Timestamp::current_millis());
if !expired_ssts.is_empty() {
info!("Expired SSTs in region {}: {:?}", region_id, expired_ssts);
// here we mark expired SSTs as compacting to avoid them being picked.
diff --git a/src/mito2/src/engine.rs b/src/mito2/src/engine.rs
index 32b69f620517..bf9777efa5f5 100644
--- a/src/mito2/src/engine.rs
+++ b/src/mito2/src/engine.rs
@@ -66,6 +66,7 @@ use api::region::RegionResponse;
use async_trait::async_trait;
use common_base::Plugins;
use common_error::ext::BoxedError;
+use common_meta::key::SchemaMetadataManagerRef;
use common_recordbatch::SendableRecordBatchStream;
use common_telemetry::tracing;
use common_wal::options::{WalOptions, WAL_OPTIONS_KEY};
@@ -112,13 +113,21 @@ impl MitoEngine {
mut config: MitoConfig,
log_store: Arc<S>,
object_store_manager: ObjectStoreManagerRef,
+ schema_metadata_manager: SchemaMetadataManagerRef,
plugins: Plugins,
) -> Result<MitoEngine> {
config.sanitize(data_home)?;
Ok(MitoEngine {
inner: Arc::new(
- EngineInner::new(config, log_store, object_store_manager, plugins).await?,
+ EngineInner::new(
+ config,
+ log_store,
+ object_store_manager,
+ schema_metadata_manager,
+ plugins,
+ )
+ .await?,
),
})
}
@@ -278,13 +287,20 @@ impl EngineInner {
config: MitoConfig,
log_store: Arc<S>,
object_store_manager: ObjectStoreManagerRef,
+ schema_metadata_manager: SchemaMetadataManagerRef,
plugins: Plugins,
) -> Result<EngineInner> {
let config = Arc::new(config);
let wal_raw_entry_reader = Arc::new(LogStoreRawEntryReader::new(log_store.clone()));
Ok(EngineInner {
- workers: WorkerGroup::start(config.clone(), log_store, object_store_manager, plugins)
- .await?,
+ workers: WorkerGroup::start(
+ config.clone(),
+ log_store,
+ object_store_manager,
+ schema_metadata_manager,
+ plugins,
+ )
+ .await?,
config,
wal_raw_entry_reader,
})
@@ -583,6 +599,7 @@ impl RegionEngine for MitoEngine {
// Tests methods.
#[cfg(any(test, feature = "test"))]
+#[allow(clippy::too_many_arguments)]
impl MitoEngine {
/// Returns a new [MitoEngine] for tests.
pub async fn new_for_test<S: LogStore>(
@@ -593,6 +610,7 @@ impl MitoEngine {
write_buffer_manager: Option<crate::flush::WriteBufferManagerRef>,
listener: Option<crate::engine::listener::EventListenerRef>,
time_provider: crate::time_provider::TimeProviderRef,
+ schema_metadata_manager: SchemaMetadataManagerRef,
) -> Result<MitoEngine> {
config.sanitize(data_home)?;
@@ -606,6 +624,7 @@ impl MitoEngine {
object_store_manager,
write_buffer_manager,
listener,
+ schema_metadata_manager,
time_provider,
)
.await?,
diff --git a/src/mito2/src/engine/alter_test.rs b/src/mito2/src/engine/alter_test.rs
index 2e75bf19faa0..8019ef647a86 100644
--- a/src/mito2/src/engine/alter_test.rs
+++ b/src/mito2/src/engine/alter_test.rs
@@ -78,6 +78,16 @@ async fn test_alter_region() {
let region_id = RegionId::new(1, 1);
let request = CreateRequestBuilder::new().build();
+ env.get_schema_metadata_manager()
+ .register_region_table_info(
+ region_id.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
+
let column_schemas = rows_schema(&request);
let region_dir = request.region_dir.clone();
engine
@@ -167,10 +177,19 @@ fn build_rows_for_tags(
async fn test_put_after_alter() {
let mut env = TestEnv::new();
let engine = env.create_engine(MitoConfig::default()).await;
-
let region_id = RegionId::new(1, 1);
let request = CreateRequestBuilder::new().build();
+ env.get_schema_metadata_manager()
+ .register_region_table_info(
+ region_id.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
+
let mut column_schemas = rows_schema(&request);
let region_dir = request.region_dir.clone();
engine
@@ -266,6 +285,16 @@ async fn test_alter_region_retry() {
let region_id = RegionId::new(1, 1);
let request = CreateRequestBuilder::new().build();
+ env.get_schema_metadata_manager()
+ .register_region_table_info(
+ region_id.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
+
let column_schemas = rows_schema(&request);
engine
.handle_request(region_id, RegionRequest::Create(request))
@@ -320,6 +349,16 @@ async fn test_alter_on_flushing() {
let region_id = RegionId::new(1, 1);
let request = CreateRequestBuilder::new().build();
+ env.get_schema_metadata_manager()
+ .register_region_table_info(
+ region_id.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
+
let column_schemas = rows_schema(&request);
engine
.handle_request(region_id, RegionRequest::Create(request))
diff --git a/src/mito2/src/engine/append_mode_test.rs b/src/mito2/src/engine/append_mode_test.rs
index 0fb148be44b0..ab8515aa133c 100644
--- a/src/mito2/src/engine/append_mode_test.rs
+++ b/src/mito2/src/engine/append_mode_test.rs
@@ -98,6 +98,16 @@ async fn test_append_mode_compaction() {
.await;
let region_id = RegionId::new(1, 1);
+ env.get_schema_metadata_manager()
+ .register_region_table_info(
+ region_id.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
+
let request = CreateRequestBuilder::new()
.insert_option("compaction.type", "twcs")
.insert_option("compaction.twcs.max_active_window_runs", "2")
diff --git a/src/mito2/src/engine/compaction_test.rs b/src/mito2/src/engine/compaction_test.rs
index 9a80d4f84fb7..efa98e6c7240 100644
--- a/src/mito2/src/engine/compaction_test.rs
+++ b/src/mito2/src/engine/compaction_test.rs
@@ -112,6 +112,16 @@ async fn test_compaction_region() {
let engine = env.create_engine(MitoConfig::default()).await;
let region_id = RegionId::new(1, 1);
+ env.get_schema_metadata_manager()
+ .register_region_table_info(
+ region_id.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
+
let request = CreateRequestBuilder::new()
.insert_option("compaction.type", "twcs")
.insert_option("compaction.twcs.max_active_window_runs", "1")
@@ -171,8 +181,18 @@ async fn test_compaction_region_with_overlapping() {
common_telemetry::init_default_ut_logging();
let mut env = TestEnv::new();
let engine = env.create_engine(MitoConfig::default()).await;
-
let region_id = RegionId::new(1, 1);
+
+ env.get_schema_metadata_manager()
+ .register_region_table_info(
+ region_id.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
+
let request = CreateRequestBuilder::new()
.insert_option("compaction.type", "twcs")
.insert_option("compaction.twcs.max_active_window_runs", "2")
@@ -217,6 +237,17 @@ async fn test_compaction_region_with_overlapping_delete_all() {
let engine = env.create_engine(MitoConfig::default()).await;
let region_id = RegionId::new(1, 1);
+
+ env.get_schema_metadata_manager()
+ .register_region_table_info(
+ region_id.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
+
let request = CreateRequestBuilder::new()
.insert_option("compaction.type", "twcs")
.insert_option("compaction.twcs.max_active_window_runs", "2")
@@ -281,6 +312,16 @@ async fn test_readonly_during_compaction() {
.await;
let region_id = RegionId::new(1, 1);
+ env.get_schema_metadata_manager()
+ .register_region_table_info(
+ region_id.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
+
let request = CreateRequestBuilder::new()
.insert_option("compaction.type", "twcs")
.insert_option("compaction.twcs.max_active_window_runs", "1")
diff --git a/src/mito2/src/engine/drop_test.rs b/src/mito2/src/engine/drop_test.rs
index c4a4790cb62f..7d719f778be9 100644
--- a/src/mito2/src/engine/drop_test.rs
+++ b/src/mito2/src/engine/drop_test.rs
@@ -16,6 +16,7 @@ use std::sync::Arc;
use std::time::Duration;
use api::v1::Rows;
+use common_meta::key::SchemaMetadataManager;
use object_store::util::join_path;
use store_api::region_engine::RegionEngine;
use store_api::region_request::{RegionDropRequest, RegionRequest};
@@ -40,6 +41,17 @@ async fn test_engine_drop_region() {
.await;
let region_id = RegionId::new(1, 1);
+
+ env.get_schema_metadata_manager()
+ .register_region_table_info(
+ region_id.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
+
// It's okay to drop a region doesn't exist.
engine
.handle_request(region_id, RegionRequest::Drop(RegionDropRequest {}))
@@ -87,7 +99,12 @@ async fn test_engine_drop_region() {
#[tokio::test]
async fn test_engine_drop_region_for_custom_store() {
common_telemetry::init_default_ut_logging();
- async fn setup(engine: &MitoEngine, region_id: RegionId, storage_name: &str) {
+ async fn setup(
+ engine: &MitoEngine,
+ schema_metadata_manager: &SchemaMetadataManager,
+ region_id: RegionId,
+ storage_name: &str,
+ ) {
let request = CreateRequestBuilder::new()
.insert_option("storage", storage_name)
.region_dir(storage_name)
@@ -97,6 +114,18 @@ async fn test_engine_drop_region_for_custom_store() {
.handle_request(region_id, RegionRequest::Create(request))
.await
.unwrap();
+
+ let table_id = format!("test_table_{}", region_id.table_id());
+ schema_metadata_manager
+ .register_region_table_info(
+ region_id.table_id(),
+ &table_id,
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
+
let rows = Rows {
schema: column_schema.clone(),
rows: build_rows_for_key("a", 0, 2, 0),
@@ -114,12 +143,19 @@ async fn test_engine_drop_region_for_custom_store() {
&["Gcs"],
)
.await;
+ let schema_metadata_manager = env.get_schema_metadata_manager();
let object_store_manager = env.get_object_store_manager().unwrap();
let global_region_id = RegionId::new(1, 1);
- setup(&engine, global_region_id, "default").await;
+ setup(
+ &engine,
+ &schema_metadata_manager,
+ global_region_id,
+ "default",
+ )
+ .await;
let custom_region_id = RegionId::new(2, 1);
- setup(&engine, custom_region_id, "Gcs").await;
+ setup(&engine, &schema_metadata_manager, custom_region_id, "Gcs").await;
let global_region = engine.get_region(global_region_id).unwrap();
let global_region_dir = global_region.access_layer.region_dir().to_string();
diff --git a/src/mito2/src/engine/edit_region_test.rs b/src/mito2/src/engine/edit_region_test.rs
index 51f2a976b343..e960504da2d1 100644
--- a/src/mito2/src/engine/edit_region_test.rs
+++ b/src/mito2/src/engine/edit_region_test.rs
@@ -64,6 +64,16 @@ async fn test_edit_region_schedule_compaction() {
.await;
let region_id = RegionId::new(1, 1);
+
+ env.get_schema_metadata_manager()
+ .register_region_table_info(
+ region_id.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
engine
.handle_request(
region_id,
diff --git a/src/mito2/src/engine/filter_deleted_test.rs b/src/mito2/src/engine/filter_deleted_test.rs
index 167617803e28..0312d3c1db06 100644
--- a/src/mito2/src/engine/filter_deleted_test.rs
+++ b/src/mito2/src/engine/filter_deleted_test.rs
@@ -32,6 +32,16 @@ async fn test_scan_without_filtering_deleted() {
let engine = env.create_engine(MitoConfig::default()).await;
let region_id = RegionId::new(1, 1);
+
+ env.get_schema_metadata_manager()
+ .register_region_table_info(
+ region_id.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
let request = CreateRequestBuilder::new()
.insert_option("compaction.type", "twcs")
.insert_option("compaction.twcs.max_active_window_runs", "10")
diff --git a/src/mito2/src/engine/flush_test.rs b/src/mito2/src/engine/flush_test.rs
index c134def6aa54..15b8dc9834eb 100644
--- a/src/mito2/src/engine/flush_test.rs
+++ b/src/mito2/src/engine/flush_test.rs
@@ -45,6 +45,16 @@ async fn test_manual_flush() {
let engine = env.create_engine(MitoConfig::default()).await;
let region_id = RegionId::new(1, 1);
+ env.get_schema_metadata_manager()
+ .register_region_table_info(
+ region_id.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
+
let request = CreateRequestBuilder::new().build();
let column_schemas = rows_schema(&request);
@@ -92,6 +102,16 @@ async fn test_flush_engine() {
.await;
let region_id = RegionId::new(1, 1);
+ env.get_schema_metadata_manager()
+ .register_region_table_info(
+ region_id.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
+
let request = CreateRequestBuilder::new().build();
let column_schemas = rows_schema(&request);
@@ -151,6 +171,15 @@ async fn test_write_stall() {
.await;
let region_id = RegionId::new(1, 1);
+ env.get_schema_metadata_manager()
+ .register_region_table_info(
+ region_id.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
let request = CreateRequestBuilder::new().build();
let column_schemas = rows_schema(&request);
@@ -215,6 +244,15 @@ async fn test_flush_empty() {
.await;
let region_id = RegionId::new(1, 1);
+ env.get_schema_metadata_manager()
+ .register_region_table_info(
+ region_id.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
let request = CreateRequestBuilder::new().build();
engine
@@ -249,8 +287,17 @@ async fn test_flush_reopen_region(factory: Option<LogStoreFactory>) {
let mut env = TestEnv::new().with_log_store_factory(factory.clone());
let engine = env.create_engine(MitoConfig::default()).await;
-
let region_id = RegionId::new(1, 1);
+ env.get_schema_metadata_manager()
+ .register_region_table_info(
+ region_id.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
+
let topic = prepare_test_for_kafka_log_store(&factory).await;
let request = CreateRequestBuilder::new()
.kafka_topic(topic.clone())
@@ -360,8 +407,17 @@ async fn test_auto_flush_engine() {
time_provider.clone(),
)
.await;
-
let region_id = RegionId::new(1, 1);
+ env.get_schema_metadata_manager()
+ .register_region_table_info(
+ region_id.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
+
let request = CreateRequestBuilder::new().build();
let column_schemas = rows_schema(&request);
@@ -421,6 +477,16 @@ async fn test_flush_workers() {
let region_id0 = RegionId::new(1, 0);
let region_id1 = RegionId::new(1, 1);
+ env.get_schema_metadata_manager()
+ .register_region_table_info(
+ region_id0.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
+
let request = CreateRequestBuilder::new().region_dir("r0").build();
let column_schemas = rows_schema(&request);
engine
diff --git a/src/mito2/src/engine/merge_mode_test.rs b/src/mito2/src/engine/merge_mode_test.rs
index 0f0be6b8f12b..08f4d0565007 100644
--- a/src/mito2/src/engine/merge_mode_test.rs
+++ b/src/mito2/src/engine/merge_mode_test.rs
@@ -98,6 +98,16 @@ async fn test_merge_mode_compaction() {
.await;
let region_id = RegionId::new(1, 1);
+ env.get_schema_metadata_manager()
+ .register_region_table_info(
+ region_id.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
+
let request = CreateRequestBuilder::new()
.field_num(2)
.insert_option("compaction.type", "twcs")
diff --git a/src/mito2/src/engine/open_test.rs b/src/mito2/src/engine/open_test.rs
index c7ad47535c81..8fd084a24ffa 100644
--- a/src/mito2/src/engine/open_test.rs
+++ b/src/mito2/src/engine/open_test.rs
@@ -245,6 +245,16 @@ async fn test_open_region_skip_wal_replay() {
let engine = env.create_engine(MitoConfig::default()).await;
let region_id = RegionId::new(1, 1);
+ env.get_schema_metadata_manager()
+ .register_region_table_info(
+ region_id.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
+
let request = CreateRequestBuilder::new().build();
let region_dir = request.region_dir.clone();
@@ -423,6 +433,16 @@ async fn test_open_compaction_region() {
let engine = env.create_engine(mito_config.clone()).await;
let region_id = RegionId::new(1, 1);
+ let schema_metadata_manager = env.get_schema_metadata_manager();
+ schema_metadata_manager
+ .register_region_table_info(
+ region_id.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
let request = CreateRequestBuilder::new().build();
let region_dir = request.region_dir.clone();
engine
@@ -444,10 +464,14 @@ async fn test_open_compaction_region() {
region_options: RegionOptions::default(),
};
- let compaction_region =
- open_compaction_region(&req, &mito_config, object_store_manager.clone())
- .await
- .unwrap();
+ let compaction_region = open_compaction_region(
+ &req,
+ &mito_config,
+ object_store_manager.clone(),
+ schema_metadata_manager,
+ )
+ .await
+ .unwrap();
assert_eq!(region_id, compaction_region.region_id);
}
diff --git a/src/mito2/src/engine/parallel_test.rs b/src/mito2/src/engine/parallel_test.rs
index cc5d98291230..53cc0dca8fb0 100644
--- a/src/mito2/src/engine/parallel_test.rs
+++ b/src/mito2/src/engine/parallel_test.rs
@@ -76,6 +76,16 @@ async fn test_parallel_scan() {
let engine = env.create_engine(MitoConfig::default()).await;
let region_id = RegionId::new(1, 1);
+ env.get_schema_metadata_manager()
+ .register_region_table_info(
+ region_id.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
+
let request = CreateRequestBuilder::new().build();
let region_dir = request.region_dir.clone();
diff --git a/src/mito2/src/engine/prune_test.rs b/src/mito2/src/engine/prune_test.rs
index b7f9056e5857..d151684d460c 100644
--- a/src/mito2/src/engine/prune_test.rs
+++ b/src/mito2/src/engine/prune_test.rs
@@ -151,6 +151,17 @@ async fn test_prune_memtable() {
let engine = env.create_engine(MitoConfig::default()).await;
let region_id = RegionId::new(1, 1);
+
+ env.get_schema_metadata_manager()
+ .register_region_table_info(
+ region_id.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
+
let request = CreateRequestBuilder::new().build();
let column_schemas = rows_schema(&request);
diff --git a/src/mito2/src/engine/row_selector_test.rs b/src/mito2/src/engine/row_selector_test.rs
index 001d0f2f6ab8..b94887810446 100644
--- a/src/mito2/src/engine/row_selector_test.rs
+++ b/src/mito2/src/engine/row_selector_test.rs
@@ -29,6 +29,15 @@ async fn test_last_row(append_mode: bool) {
let engine = env.create_engine(MitoConfig::default()).await;
let region_id = RegionId::new(1, 1);
+ env.get_schema_metadata_manager()
+ .register_region_table_info(
+ region_id.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
let request = CreateRequestBuilder::new()
.insert_option("append_mode", &append_mode.to_string())
.build();
diff --git a/src/mito2/src/engine/truncate_test.rs b/src/mito2/src/engine/truncate_test.rs
index 91c08f3b3e78..a61fff086e18 100644
--- a/src/mito2/src/engine/truncate_test.rs
+++ b/src/mito2/src/engine/truncate_test.rs
@@ -151,6 +151,17 @@ async fn test_engine_truncate_after_flush() {
// Create the region.
let region_id = RegionId::new(1, 1);
+
+ env.get_schema_metadata_manager()
+ .register_region_table_info(
+ region_id.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ )
+ .await;
+
let request = CreateRequestBuilder::new().build();
let column_schemas = rows_schema(&request);
engine
diff --git a/src/mito2/src/error.rs b/src/mito2/src/error.rs
index 8aa799cbb913..6cb4f8abdd7a 100644
--- a/src/mito2/src/error.rs
+++ b/src/mito2/src/error.rs
@@ -870,6 +870,13 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
+
+ #[snafu(display("Failed to get schema metadata"))]
+ GetSchemaMetadata {
+ source: common_meta::error::Error,
+ #[snafu(implicit)]
+ location: Location,
+ },
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
@@ -1002,6 +1009,7 @@ impl ErrorExt for Error {
| ApplyFulltextIndex { source, .. } => source.status_code(),
DecodeStats { .. } | StatsNotPresent { .. } => StatusCode::Internal,
RegionBusy { .. } => StatusCode::RegionBusy,
+ GetSchemaMetadata { source, .. } => source.status_code(),
}
}
diff --git a/src/mito2/src/test_util.rs b/src/mito2/src/test_util.rs
index 0bd85747c0f1..ac3cff1578de 100644
--- a/src/mito2/src/test_util.rs
+++ b/src/mito2/src/test_util.rs
@@ -35,6 +35,9 @@ use api::v1::{OpType, Row, Rows, SemanticType};
use common_base::readable_size::ReadableSize;
use common_base::Plugins;
use common_datasource::compression::CompressionType;
+use common_meta::key::{SchemaMetadataManager, SchemaMetadataManagerRef};
+use common_meta::kv_backend::memory::MemoryKvBackend;
+use common_meta::kv_backend::KvBackendRef;
use common_telemetry::warn;
use common_test_util::temp_dir::{create_temp_dir, TempDir};
use common_wal::options::{KafkaWalOptions, WalOptions, WAL_OPTIONS_KEY};
@@ -195,6 +198,7 @@ pub struct TestEnv {
log_store: Option<LogStoreImpl>,
log_store_factory: LogStoreFactory,
object_store_manager: Option<ObjectStoreManagerRef>,
+ schema_metadata_manager: SchemaMetadataManagerRef,
}
impl Default for TestEnv {
@@ -211,6 +215,10 @@ impl TestEnv {
log_store: None,
log_store_factory: LogStoreFactory::RaftEngine(RaftEngineLogStoreFactory),
object_store_manager: None,
+ schema_metadata_manager: Arc::new(SchemaMetadataManager::new(Arc::new(
+ MemoryKvBackend::new(),
+ )
+ as KvBackendRef)),
}
}
@@ -221,6 +229,10 @@ impl TestEnv {
log_store: None,
log_store_factory: LogStoreFactory::RaftEngine(RaftEngineLogStoreFactory),
object_store_manager: None,
+ schema_metadata_manager: Arc::new(SchemaMetadataManager::new(Arc::new(
+ MemoryKvBackend::new(),
+ )
+ as KvBackendRef)),
}
}
@@ -231,6 +243,10 @@ impl TestEnv {
log_store: None,
log_store_factory: LogStoreFactory::RaftEngine(RaftEngineLogStoreFactory),
object_store_manager: None,
+ schema_metadata_manager: Arc::new(SchemaMetadataManager::new(Arc::new(
+ MemoryKvBackend::new(),
+ )
+ as KvBackendRef)),
}
}
@@ -269,6 +285,7 @@ impl TestEnv {
config,
log_store,
object_store_manager,
+ self.schema_metadata_manager.clone(),
Plugins::new(),
)
.await
@@ -278,6 +295,7 @@ impl TestEnv {
config,
log_store,
object_store_manager,
+ self.schema_metadata_manager.clone(),
Plugins::new(),
)
.await
@@ -295,6 +313,7 @@ impl TestEnv {
config,
log_store,
object_store_manager,
+ self.schema_metadata_manager.clone(),
Plugins::new(),
)
.await
@@ -304,6 +323,7 @@ impl TestEnv {
config,
log_store,
object_store_manager,
+ self.schema_metadata_manager.clone(),
Plugins::new(),
)
.await
@@ -335,6 +355,7 @@ impl TestEnv {
manager,
listener,
Arc::new(StdTimeProvider),
+ self.schema_metadata_manager.clone(),
)
.await
.unwrap(),
@@ -346,6 +367,7 @@ impl TestEnv {
manager,
listener,
Arc::new(StdTimeProvider),
+ self.schema_metadata_manager.clone(),
)
.await
.unwrap(),
@@ -388,6 +410,7 @@ impl TestEnv {
manager,
listener,
Arc::new(StdTimeProvider),
+ self.schema_metadata_manager.clone(),
)
.await
.unwrap(),
@@ -399,6 +422,7 @@ impl TestEnv {
manager,
listener,
Arc::new(StdTimeProvider),
+ self.schema_metadata_manager.clone(),
)
.await
.unwrap(),
@@ -430,6 +454,7 @@ impl TestEnv {
manager,
listener,
time_provider.clone(),
+ self.schema_metadata_manager.clone(),
)
.await
.unwrap(),
@@ -441,6 +466,7 @@ impl TestEnv {
manager,
listener,
time_provider.clone(),
+ self.schema_metadata_manager.clone(),
)
.await
.unwrap(),
@@ -450,13 +476,13 @@ impl TestEnv {
/// Reopen the engine.
pub async fn reopen_engine(&mut self, engine: MitoEngine, config: MitoConfig) -> MitoEngine {
engine.stop().await.unwrap();
-
match self.log_store.as_ref().unwrap().clone() {
LogStoreImpl::RaftEngine(log_store) => MitoEngine::new(
&self.data_home().display().to_string(),
config,
log_store,
self.object_store_manager.clone().unwrap(),
+ self.schema_metadata_manager.clone(),
Plugins::new(),
)
.await
@@ -466,6 +492,7 @@ impl TestEnv {
config,
log_store,
self.object_store_manager.clone().unwrap(),
+ self.schema_metadata_manager.clone(),
Plugins::new(),
)
.await
@@ -481,6 +508,7 @@ impl TestEnv {
config,
log_store,
self.object_store_manager.clone().unwrap(),
+ self.schema_metadata_manager.clone(),
Plugins::new(),
)
.await
@@ -490,6 +518,7 @@ impl TestEnv {
config,
log_store,
self.object_store_manager.clone().unwrap(),
+ self.schema_metadata_manager.clone(),
Plugins::new(),
)
.await
@@ -515,6 +544,7 @@ impl TestEnv {
Arc::new(config),
log_store,
Arc::new(object_store_manager),
+ self.schema_metadata_manager.clone(),
Plugins::new(),
)
.await
@@ -523,6 +553,7 @@ impl TestEnv {
Arc::new(config),
log_store,
Arc::new(object_store_manager),
+ self.schema_metadata_manager.clone(),
Plugins::new(),
)
.await
@@ -630,6 +661,10 @@ impl TestEnv {
Arc::new(write_cache)
}
+
+ pub fn get_schema_metadata_manager(&self) -> SchemaMetadataManagerRef {
+ self.schema_metadata_manager.clone()
+ }
}
/// Builder to mock a [RegionCreateRequest].
diff --git a/src/mito2/src/worker.rs b/src/mito2/src/worker.rs
index eb2cea19d2ad..7ebb12963bc9 100644
--- a/src/mito2/src/worker.rs
+++ b/src/mito2/src/worker.rs
@@ -31,6 +31,7 @@ use std::sync::Arc;
use std::time::Duration;
use common_base::Plugins;
+use common_meta::key::SchemaMetadataManagerRef;
use common_runtime::JoinHandle;
use common_telemetry::{error, info, warn};
use futures::future::try_join_all;
@@ -132,6 +133,7 @@ impl WorkerGroup {
config: Arc<MitoConfig>,
log_store: Arc<S>,
object_store_manager: ObjectStoreManagerRef,
+ schema_metadata_manager: SchemaMetadataManagerRef,
plugins: Plugins,
) -> Result<WorkerGroup> {
let (flush_sender, flush_receiver) = watch::channel(());
@@ -191,6 +193,7 @@ impl WorkerGroup {
flush_sender: flush_sender.clone(),
flush_receiver: flush_receiver.clone(),
plugins: plugins.clone(),
+ schema_metadata_manager: schema_metadata_manager.clone(),
}
.start()
})
@@ -273,6 +276,7 @@ impl WorkerGroup {
object_store_manager: ObjectStoreManagerRef,
write_buffer_manager: Option<WriteBufferManagerRef>,
listener: Option<crate::engine::listener::EventListenerRef>,
+ schema_metadata_manager: SchemaMetadataManagerRef,
time_provider: TimeProviderRef,
) -> Result<WorkerGroup> {
let (flush_sender, flush_receiver) = watch::channel(());
@@ -329,6 +333,7 @@ impl WorkerGroup {
flush_sender: flush_sender.clone(),
flush_receiver: flush_receiver.clone(),
plugins: Plugins::new(),
+ schema_metadata_manager: schema_metadata_manager.clone(),
}
.start()
})
@@ -405,6 +410,7 @@ struct WorkerStarter<S> {
/// Watch channel receiver to wait for background flush job.
flush_receiver: watch::Receiver<()>,
plugins: Plugins,
+ schema_metadata_manager: SchemaMetadataManagerRef,
}
impl<S: LogStore> WorkerStarter<S> {
@@ -455,6 +461,7 @@ impl<S: LogStore> WorkerStarter<S> {
stalled_count: WRITE_STALL_TOTAL.with_label_values(&[&id_string]),
region_count: REGION_COUNT.with_label_values(&[&id_string]),
region_edit_queues: RegionEditQueues::default(),
+ schema_metadata_manager: self.schema_metadata_manager,
};
let handle = common_runtime::spawn_global(async move {
worker_thread.run().await;
@@ -645,6 +652,8 @@ struct RegionWorkerLoop<S> {
region_count: IntGauge,
/// Queues for region edit requests.
region_edit_queues: RegionEditQueues,
+ /// Database level metadata manager.
+ schema_metadata_manager: SchemaMetadataManagerRef,
}
impl<S: LogStore> RegionWorkerLoop<S> {
diff --git a/src/mito2/src/worker/handle_compaction.rs b/src/mito2/src/worker/handle_compaction.rs
index e0889320350c..292eb237357b 100644
--- a/src/mito2/src/worker/handle_compaction.rs
+++ b/src/mito2/src/worker/handle_compaction.rs
@@ -44,6 +44,7 @@ impl<S> RegionWorkerLoop<S> {
®ion.access_layer,
sender,
®ion.manifest_ctx,
+ self.schema_metadata_manager.clone(),
)
.await
{
@@ -80,7 +81,11 @@ impl<S> RegionWorkerLoop<S> {
// Schedule next compaction if necessary.
self.compaction_scheduler
- .on_compaction_finished(region_id, ®ion.manifest_ctx)
+ .on_compaction_finished(
+ region_id,
+ ®ion.manifest_ctx,
+ self.schema_metadata_manager.clone(),
+ )
.await;
}
@@ -107,6 +112,7 @@ impl<S> RegionWorkerLoop<S> {
®ion.access_layer,
OptionOutputTx::none(),
®ion.manifest_ctx,
+ self.schema_metadata_manager.clone(),
)
.await
{
diff --git a/src/operator/src/insert.rs b/src/operator/src/insert.rs
index c2fc69d7db81..cae919f770f3 100644
--- a/src/operator/src/insert.rs
+++ b/src/operator/src/insert.rs
@@ -760,10 +760,8 @@ impl Inserter {
ctx: &QueryContextRef,
statement_executor: &StatementExecutor,
) -> Result<Vec<TableRef>> {
- let catalog_name = ctx.current_catalog();
- let schema_name = ctx.current_schema();
let res = statement_executor
- .create_logical_tables(catalog_name, &schema_name, &create_table_exprs, ctx.clone())
+ .create_logical_tables(&create_table_exprs, ctx.clone())
.await;
match res {
diff --git a/src/operator/src/statement/ddl.rs b/src/operator/src/statement/ddl.rs
index cf10a45652b0..6059cb3ebe78 100644
--- a/src/operator/src/statement/ddl.rs
+++ b/src/operator/src/statement/ddl.rs
@@ -26,7 +26,7 @@ use common_error::ext::BoxedError;
use common_meta::cache_invalidator::Context;
use common_meta::ddl::ExecutorContext;
use common_meta::instruction::CacheIdent;
-use common_meta::key::schema_name::{SchemaNameKey, SchemaNameValue};
+use common_meta::key::schema_name::SchemaNameKey;
use common_meta::key::NAME_PATTERN;
use common_meta::rpc::ddl::{
CreateFlowTask, DdlTask, DropFlowTask, DropViewTask, SubmitDdlTaskRequest,
@@ -116,9 +116,21 @@ impl StatementExecutor {
.await
.context(error::FindTablePartitionRuleSnafu { table_name: table })?;
+ // CREATE TABLE LIKE also inherits database level options.
+ let schema_options = self
+ .table_metadata_manager
+ .schema_manager()
+ .get(SchemaNameKey {
+ catalog: &catalog,
+ schema: &schema,
+ })
+ .await
+ .context(TableMetadataManagerSnafu)?;
+
let quote_style = ctx.quote_style();
- let mut create_stmt = create_table_stmt(&table_ref.table_info(), quote_style)
- .context(error::ParseQuerySnafu)?;
+ let mut create_stmt =
+ create_table_stmt(&table_ref.table_info(), schema_options, quote_style)
+ .context(error::ParseQuerySnafu)?;
create_stmt.name = stmt.table_name;
create_stmt.if_not_exists = false;
@@ -165,15 +177,8 @@ impl StatementExecutor {
.table_options
.contains_key(LOGICAL_TABLE_METADATA_KEY)
{
- let catalog_name = &create_table.catalog_name;
- let schema_name = &create_table.schema_name;
return self
- .create_logical_tables(
- catalog_name,
- schema_name,
- &[create_table.clone()],
- query_ctx,
- )
+ .create_logical_tables(&[create_table.clone()], query_ctx)
.await?
.into_iter()
.next()
@@ -183,6 +188,7 @@ impl StatementExecutor {
}
let _timer = crate::metrics::DIST_CREATE_TABLE.start_timer();
+
let schema = self
.table_metadata_manager
.schema_manager()
@@ -193,12 +199,12 @@ impl StatementExecutor {
.await
.context(TableMetadataManagerSnafu)?;
- let Some(schema_opts) = schema else {
- return SchemaNotFoundSnafu {
+ ensure!(
+ schema.is_some(),
+ SchemaNotFoundSnafu {
schema_info: &create_table.schema_name,
}
- .fail();
- };
+ );
// if table exists.
if let Some(table) = self
@@ -240,7 +246,7 @@ impl StatementExecutor {
);
let (partitions, partition_cols) = parse_partitions(create_table, partitions, &query_ctx)?;
- let mut table_info = create_table_info(create_table, partition_cols, schema_opts)?;
+ let mut table_info = create_table_info(create_table, partition_cols)?;
let resp = self
.create_table_procedure(
@@ -273,8 +279,6 @@ impl StatementExecutor {
#[tracing::instrument(skip_all)]
pub async fn create_logical_tables(
&self,
- catalog_name: &str,
- schema_name: &str,
create_table_exprs: &[CreateTableExpr],
query_context: QueryContextRef,
) -> Result<Vec<TableRef>> {
@@ -296,19 +300,9 @@ impl StatementExecutor {
);
}
- let schema = self
- .table_metadata_manager
- .schema_manager()
- .get(SchemaNameKey::new(catalog_name, schema_name))
- .await
- .context(TableMetadataManagerSnafu)?
- .context(SchemaNotFoundSnafu {
- schema_info: schema_name,
- })?;
-
let mut raw_tables_info = create_table_exprs
.iter()
- .map(|create| create_table_info(create, vec![], schema.clone()))
+ .map(|create| create_table_info(create, vec![]))
.collect::<Result<Vec<_>>>()?;
let tables_data = create_table_exprs
.iter()
@@ -1261,7 +1255,6 @@ fn parse_partitions(
fn create_table_info(
create_table: &CreateTableExpr,
partition_columns: Vec<String>,
- schema_opts: SchemaNameValue,
) -> Result<RawTableInfo> {
let mut column_schemas = Vec::with_capacity(create_table.column_defs.len());
let mut column_name_to_index_map = HashMap::new();
@@ -1310,7 +1303,6 @@ fn create_table_info(
let table_options = TableOptions::try_from_iter(&create_table.table_options)
.context(UnrecognizedTableOptionSnafu)?;
- let table_options = merge_options(table_options, schema_opts);
let meta = RawTableMeta {
schema: raw_schema,
@@ -1495,12 +1487,6 @@ fn convert_value(
.context(ParseSqlValueSnafu)
}
-/// Merge table level table options with schema level table options.
-fn merge_options(mut table_opts: TableOptions, schema_opts: SchemaNameValue) -> TableOptions {
- table_opts.ttl = table_opts.ttl.or(schema_opts.ttl);
- table_opts
-}
-
#[cfg(test)]
mod test {
use session::context::{QueryContext, QueryContextBuilder};
diff --git a/src/operator/src/statement/show.rs b/src/operator/src/statement/show.rs
index eb69983f01c1..111a70ab194d 100644
--- a/src/operator/src/statement/show.rs
+++ b/src/operator/src/statement/show.rs
@@ -13,6 +13,7 @@
// limitations under the License.
use common_error::ext::BoxedError;
+use common_meta::key::schema_name::SchemaNameKey;
use common_query::Output;
use common_telemetry::tracing;
use partition::manager::PartitionInfo;
@@ -33,7 +34,7 @@ use table::TableRef;
use crate::error::{
self, CatalogSnafu, ExecuteStatementSnafu, ExternalSnafu, FindViewInfoSnafu, InvalidSqlSnafu,
- Result, ViewInfoNotFoundSnafu, ViewNotFoundSnafu,
+ Result, TableMetadataManagerSnafu, ViewInfoNotFoundSnafu, ViewNotFoundSnafu,
};
use crate::statement::StatementExecutor;
@@ -118,6 +119,16 @@ impl StatementExecutor {
.fail();
}
+ let schema_options = self
+ .table_metadata_manager
+ .schema_manager()
+ .get(SchemaNameKey {
+ catalog: &table_name.catalog_name,
+ schema: &table_name.schema_name,
+ })
+ .await
+ .context(TableMetadataManagerSnafu)?;
+
let partitions = self
.partition_manager
.find_table_partitions(table.table_info().table_id())
@@ -128,7 +139,8 @@ impl StatementExecutor {
let partitions = create_partitions_stmt(partitions)?;
- query::sql::show_create_table(table, partitions, query_ctx).context(ExecuteStatementSnafu)
+ query::sql::show_create_table(table, schema_options, partitions, query_ctx)
+ .context(ExecuteStatementSnafu)
}
#[tracing::instrument(skip_all)]
diff --git a/src/query/src/sql.rs b/src/query/src/sql.rs
index ca79ef7416cc..5679cd5dc43d 100644
--- a/src/query/src/sql.rs
+++ b/src/query/src/sql.rs
@@ -32,6 +32,7 @@ use common_datasource::lister::{Lister, Source};
use common_datasource::object_store::build_backend;
use common_datasource::util::find_dir_and_filename;
use common_meta::key::flow::flow_info::FlowInfoValue;
+use common_meta::SchemaOptions;
use common_query::prelude::GREPTIME_TIMESTAMP;
use common_query::Output;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
@@ -703,6 +704,7 @@ pub fn show_create_database(database_name: &str, options: OptionMap) -> Result<O
pub fn show_create_table(
table: TableRef,
+ schema_options: Option<SchemaOptions>,
partitions: Option<Partitions>,
query_ctx: QueryContextRef,
) -> Result<Output> {
@@ -711,7 +713,7 @@ pub fn show_create_table(
let quote_style = query_ctx.quote_style();
- let mut stmt = create_table_stmt(&table_info, quote_style)?;
+ let mut stmt = create_table_stmt(&table_info, schema_options, quote_style)?;
stmt.partitions = partitions.map(|mut p| {
p.set_quote(quote_style);
p
diff --git a/src/query/src/sql/show_create_table.rs b/src/query/src/sql/show_create_table.rs
index 2c560bd36013..5e6accc4e458 100644
--- a/src/query/src/sql/show_create_table.rs
+++ b/src/query/src/sql/show_create_table.rs
@@ -16,6 +16,7 @@
use std::collections::HashMap;
+use common_meta::SchemaOptions;
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, SchemaRef, COMMENT_KEY};
use humantime::format_duration;
use snafu::ResultExt;
@@ -36,7 +37,8 @@ use crate::error::{
ConvertSqlTypeSnafu, ConvertSqlValueSnafu, GetFulltextOptionsSnafu, Result, SqlSnafu,
};
-fn create_sql_options(table_meta: &TableMeta) -> OptionMap {
+/// Generates CREATE TABLE options from given table metadata and schema-level options.
+fn create_sql_options(table_meta: &TableMeta, schema_options: Option<SchemaOptions>) -> OptionMap {
let table_opts = &table_meta.options;
let mut options = OptionMap::default();
if let Some(write_buffer_size) = table_opts.write_buffer_size {
@@ -47,7 +49,12 @@ fn create_sql_options(table_meta: &TableMeta) -> OptionMap {
}
if let Some(ttl) = table_opts.ttl {
options.insert(TTL_KEY.to_string(), format_duration(ttl).to_string());
- }
+ } else if let Some(database_ttl) = schema_options.and_then(|o| o.ttl) {
+ options.insert(
+ TTL_KEY.to_string(),
+ format_duration(database_ttl).to_string(),
+ );
+ };
for (k, v) in table_opts
.extra_options
.iter()
@@ -169,7 +176,11 @@ fn create_table_constraints(
}
/// Create a CreateTable statement from table info.
-pub fn create_table_stmt(table_info: &TableInfoRef, quote_style: char) -> Result<CreateTable> {
+pub fn create_table_stmt(
+ table_info: &TableInfoRef,
+ schema_options: Option<SchemaOptions>,
+ quote_style: char,
+) -> Result<CreateTable> {
let table_meta = &table_info.meta;
let table_name = &table_info.name;
let schema = &table_info.meta.schema;
@@ -195,7 +206,7 @@ pub fn create_table_stmt(table_info: &TableInfoRef, quote_style: char) -> Result
columns,
engine: table_meta.engine.clone(),
constraints,
- options: create_sql_options(table_meta),
+ options: create_sql_options(table_meta, schema_options),
partitions: None,
})
}
@@ -271,7 +282,7 @@ mod tests {
.unwrap(),
);
- let stmt = create_table_stmt(&info, '"').unwrap();
+ let stmt = create_table_stmt(&info, None, '"').unwrap();
let sql = format!("\n{}", stmt);
assert_eq!(
@@ -337,7 +348,7 @@ ENGINE=mito
.unwrap(),
);
- let stmt = create_table_stmt(&info, '"').unwrap();
+ let stmt = create_table_stmt(&info, None, '"').unwrap();
let sql = format!("\n{}", stmt);
assert_eq!(
|
fix
|
database base ttl (#4926)
|
665b7e5c6edf074c6db7aa2e28bd8fed2e083bd0
|
2024-08-09 13:47:54
|
Weny Xu
|
perf: merge small byte ranges for optimized fetching (#4520)
| false
|
diff --git a/src/mito2/src/sst/parquet/helper.rs b/src/mito2/src/sst/parquet/helper.rs
index b3cc8f8279d3..e80f751af982 100644
--- a/src/mito2/src/sst/parquet/helper.rs
+++ b/src/mito2/src/sst/parquet/helper.rs
@@ -16,7 +16,7 @@ use std::ops::Range;
use std::sync::Arc;
use bytes::Bytes;
-use object_store::{ErrorKind, ObjectStore};
+use object_store::ObjectStore;
use parquet::basic::ColumnOrder;
use parquet::file::metadata::{FileMetaData, ParquetMetaData, RowGroupMetaData};
use parquet::format;
@@ -88,84 +88,26 @@ fn parse_column_orders(
}
}
-/// Fetches data from object store.
-/// If the object store supports blocking, use sequence blocking read.
-/// Otherwise, use concurrent read.
-pub async fn fetch_byte_ranges(
- file_path: &str,
- object_store: ObjectStore,
- ranges: &[Range<u64>],
-) -> object_store::Result<Vec<Bytes>> {
- if object_store.info().full_capability().blocking {
- fetch_ranges_seq(file_path, object_store, ranges).await
- } else {
- fetch_ranges_concurrent(file_path, object_store, ranges).await
- }
-}
-
-/// Fetches data from object store sequentially
-async fn fetch_ranges_seq(
- file_path: &str,
- object_store: ObjectStore,
- ranges: &[Range<u64>],
-) -> object_store::Result<Vec<Bytes>> {
- let block_object_store = object_store.blocking();
- let file_path = file_path.to_string();
- let ranges = ranges.to_vec();
-
- let f = move || -> object_store::Result<Vec<Bytes>> {
- ranges
- .into_iter()
- .map(|range| {
- let data = block_object_store
- .read_with(&file_path)
- .range(range.start..range.end)
- .call()?;
- Ok::<_, object_store::Error>(data.to_bytes())
- })
- .collect::<object_store::Result<Vec<_>>>()
- };
-
- maybe_spawn_blocking(f).await
-}
+const FETCH_PARALLELISM: usize = 8;
+const MERGE_GAP: usize = 512 * 1024;
-/// Fetches data from object store concurrently.
-async fn fetch_ranges_concurrent(
+/// Asynchronously fetches byte ranges from an object store.
+///
+/// * `FETCH_PARALLELISM` - The number of concurrent fetch operations.
+/// * `MERGE_GAP` - The maximum gap size (in bytes) to merge small byte ranges for optimized fetching.
+pub async fn fetch_byte_ranges(
file_path: &str,
object_store: ObjectStore,
ranges: &[Range<u64>],
) -> object_store::Result<Vec<Bytes>> {
- // TODO(QuenKar): may merge small ranges to a bigger range to optimize.
- let mut handles = Vec::with_capacity(ranges.len());
- for range in ranges {
- let future_read = object_store.read_with(file_path);
- handles.push(async move {
- let data = future_read.range(range.start..range.end).await?;
- Ok::<_, object_store::Error>(data.to_bytes())
- });
- }
- let results = futures::future::try_join_all(handles).await?;
- Ok(results)
-}
-
-// Port from https://github.com/apache/arrow-rs/blob/802ed428f87051fdca31180430ddb0ecb2f60e8b/object_store/src/util.rs#L74-L83
-/// Takes a function and spawns it to a tokio blocking pool if available
-async fn maybe_spawn_blocking<F, T>(f: F) -> object_store::Result<T>
-where
- F: FnOnce() -> object_store::Result<T> + Send + 'static,
- T: Send + 'static,
-{
- match tokio::runtime::Handle::try_current() {
- Ok(runtime) => runtime
- .spawn_blocking(f)
- .await
- .map_err(new_task_join_error)?,
- Err(_) => f(),
- }
-}
-
-// https://github.com/apache/opendal/blob/v0.46.0/core/src/raw/tokio_util.rs#L21-L24
-/// Parse tokio error into opendal::Error.
-fn new_task_join_error(e: tokio::task::JoinError) -> object_store::Error {
- object_store::Error::new(ErrorKind::Unexpected, "tokio task join failed").set_source(e)
+ Ok(object_store
+ .reader_with(file_path)
+ .concurrent(FETCH_PARALLELISM)
+ .gap(MERGE_GAP)
+ .await?
+ .fetch(ranges.to_vec())
+ .await?
+ .into_iter()
+ .map(|buf| buf.to_bytes())
+ .collect::<Vec<_>>())
}
|
perf
|
merge small byte ranges for optimized fetching (#4520)
|
266919c226f4da5296c75797169843094b221f4f
|
2024-12-18 11:40:59
|
Lanqing Yang
|
fix: display inverted and fulltext index in show index (#5169)
| false
|
diff --git a/src/catalog/src/system_schema/information_schema/key_column_usage.rs b/src/catalog/src/system_schema/information_schema/key_column_usage.rs
index 56713dabba28..42cfa53fdb38 100644
--- a/src/catalog/src/system_schema/information_schema/key_column_usage.rs
+++ b/src/catalog/src/system_schema/information_schema/key_column_usage.rs
@@ -54,6 +54,10 @@ const INIT_CAPACITY: usize = 42;
pub(crate) const PRI_CONSTRAINT_NAME: &str = "PRIMARY";
/// Time index constraint name
pub(crate) const TIME_INDEX_CONSTRAINT_NAME: &str = "TIME INDEX";
+/// Inverted index constraint name
+pub(crate) const INVERTED_INDEX_CONSTRAINT_NAME: &str = "INVERTED INDEX";
+/// Fulltext index constraint name
+pub(crate) const FULLTEXT_INDEX_CONSTRAINT_NAME: &str = "FULLTEXT INDEX";
/// The virtual table implementation for `information_schema.KEY_COLUMN_USAGE`.
pub(super) struct InformationSchemaKeyColumnUsage {
@@ -216,14 +220,13 @@ impl InformationSchemaKeyColumnUsageBuilder {
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, None);
while let Some(table) = stream.try_next().await? {
- let mut primary_constraints = vec![];
-
let table_info = table.table_info();
let table_name = &table_info.name;
let keys = &table_info.meta.primary_key_indices;
let schema = table.schema();
for (idx, column) in schema.column_schemas().iter().enumerate() {
+ let mut constraints = vec![];
if column.is_time_index() {
self.add_key_column_usage(
&predicates,
@@ -236,30 +239,31 @@ impl InformationSchemaKeyColumnUsageBuilder {
1, //always 1 for time index
);
}
+ // TODO(dimbtp): foreign key constraint not supported yet
if keys.contains(&idx) {
- primary_constraints.push((
- catalog_name.clone(),
- schema_name.clone(),
- table_name.to_string(),
- column.name.clone(),
- ));
+ constraints.push(PRI_CONSTRAINT_NAME);
+ }
+ if column.is_inverted_indexed() {
+ constraints.push(INVERTED_INDEX_CONSTRAINT_NAME);
+ }
+
+ if column.has_fulltext_index_key() {
+ constraints.push(FULLTEXT_INDEX_CONSTRAINT_NAME);
}
- // TODO(dimbtp): foreign key constraint not supported yet
- }
- for (i, (catalog_name, schema_name, table_name, column_name)) in
- primary_constraints.into_iter().enumerate()
- {
- self.add_key_column_usage(
- &predicates,
- &schema_name,
- PRI_CONSTRAINT_NAME,
- &catalog_name,
- &schema_name,
- &table_name,
- &column_name,
- i as u32 + 1,
- );
+ if !constraints.is_empty() {
+ let aggregated_constraints = constraints.join(", ");
+ self.add_key_column_usage(
+ &predicates,
+ &schema_name,
+ &aggregated_constraints,
+ &catalog_name,
+ &schema_name,
+ table_name,
+ &column.name,
+ idx as u32 + 1,
+ );
+ }
}
}
}
diff --git a/src/datatypes/src/schema/column_schema.rs b/src/datatypes/src/schema/column_schema.rs
index aee9efd9625d..7a96ab5e2bf2 100644
--- a/src/datatypes/src/schema/column_schema.rs
+++ b/src/datatypes/src/schema/column_schema.rs
@@ -164,6 +164,10 @@ impl ColumnSchema {
.unwrap_or(false)
}
+ pub fn has_fulltext_index_key(&self) -> bool {
+ self.metadata.contains_key(FULLTEXT_KEY)
+ }
+
pub fn has_inverted_index_key(&self) -> bool {
self.metadata.contains_key(INVERTED_INDEX_KEY)
}
diff --git a/src/query/src/sql.rs b/src/query/src/sql.rs
index 062bd8e14e18..3337503d097c 100644
--- a/src/query/src/sql.rs
+++ b/src/query/src/sql.rs
@@ -40,7 +40,7 @@ use common_recordbatch::RecordBatches;
use common_time::timezone::get_timezone;
use common_time::Timestamp;
use datafusion::common::ScalarValue;
-use datafusion::prelude::SessionContext;
+use datafusion::prelude::{concat_ws, SessionContext};
use datafusion_expr::{case, col, lit, Expr};
use datatypes::prelude::*;
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, RawSchema, Schema};
@@ -400,6 +400,20 @@ pub async fn show_index(
query_ctx.current_schema()
};
+ let fulltext_index_expr = case(col("constraint_name").like(lit("%FULLTEXT INDEX%")))
+ .when(lit(true), lit("greptime-fulltext-index-v1"))
+ .otherwise(null())
+ .context(error::PlanSqlSnafu)?;
+
+ let inverted_index_expr = case(
+ col("constraint_name")
+ .like(lit("%INVERTED INDEX%"))
+ .or(col("constraint_name").like(lit("%PRIMARY%"))),
+ )
+ .when(lit(true), lit("greptime-inverted-index-v1"))
+ .otherwise(null())
+ .context(error::PlanSqlSnafu)?;
+
let select = vec![
// 1 as `Non_unique`: contain duplicates
lit(1).alias(INDEX_NONT_UNIQUE_COLUMN),
@@ -417,8 +431,11 @@ pub async fn show_index(
.otherwise(lit(YES_STR))
.context(error::PlanSqlSnafu)?
.alias(COLUMN_NULLABLE_COLUMN),
- // TODO(dennis): maybe 'BTREE'?
- lit("greptime-inverted-index-v1").alias(INDEX_INDEX_TYPE_COLUMN),
+ concat_ws(
+ lit(", "),
+ vec![inverted_index_expr.clone(), fulltext_index_expr.clone()],
+ )
+ .alias(INDEX_INDEX_TYPE_COLUMN),
lit("").alias(COLUMN_COMMENT_COLUMN),
lit("").alias(INDEX_COMMENT_COLUMN),
lit(YES_STR).alias(INDEX_VISIBLE_COLUMN),
diff --git a/tests/cases/standalone/common/show/show_index.result b/tests/cases/standalone/common/show/show_index.result
index 995da87c133d..6f179687dbb5 100644
--- a/tests/cases/standalone/common/show/show_index.result
+++ b/tests/cases/standalone/common/show/show_index.result
@@ -1,11 +1,15 @@
CREATE TABLE IF NOT EXISTS system_metrics (
host STRING,
- idc STRING,
+ idc STRING FULLTEXT,
cpu_util DOUBLE,
memory_util DOUBLE,
disk_util DOUBLE,
+ desc1 STRING,
+ desc2 STRING FULLTEXT,
+ desc3 STRING FULLTEXT,
ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY(host, idc),
+ INVERTED INDEX(idc, desc1, desc2),
TIME INDEX(ts)
);
@@ -33,28 +37,34 @@ SHOW INDEX FROM test;
+-------+------------+------------+--------------+-------------+-----------+-------------+----------+--------+------+----------------------------+---------+---------------+---------+------------+
| test | 1 | PRIMARY | 1 | a | A | | | | YES | greptime-inverted-index-v1 | | | YES | |
| test | 1 | PRIMARY | 2 | b | A | | | | YES | greptime-inverted-index-v1 | | | YES | |
-| test | 1 | TIME INDEX | 1 | ts | A | | | | NO | greptime-inverted-index-v1 | | | YES | |
+| test | 1 | TIME INDEX | 1 | ts | A | | | | NO | | | | YES | |
+-------+------------+------------+--------------+-------------+-----------+-------------+----------+--------+------+----------------------------+---------+---------------+---------+------------+
SHOW INDEX FROM system_metrics;
-+----------------+------------+------------+--------------+-------------+-----------+-------------+----------+--------+------+----------------------------+---------+---------------+---------+------------+
-| Table | Non_unique | Key_name | Seq_in_index | Column_name | Collation | Cardinality | Sub_part | Packed | Null | Index_type | Comment | Index_comment | Visible | Expression |
-+----------------+------------+------------+--------------+-------------+-----------+-------------+----------+--------+------+----------------------------+---------+---------------+---------+------------+
-| system_metrics | 1 | PRIMARY | 1 | host | A | | | | YES | greptime-inverted-index-v1 | | | YES | |
-| system_metrics | 1 | PRIMARY | 2 | idc | A | | | | YES | greptime-inverted-index-v1 | | | YES | |
-| system_metrics | 1 | TIME INDEX | 1 | ts | A | | | | NO | greptime-inverted-index-v1 | | | YES | |
-+----------------+------------+------------+--------------+-------------+-----------+-------------+----------+--------+------+----------------------------+---------+---------------+---------+------------+
++----------------+------------+-----------------------------------------+--------------+-------------+-----------+-------------+----------+--------+------+--------------------------------------------------------+---------+---------------+---------+------------+
+| Table | Non_unique | Key_name | Seq_in_index | Column_name | Collation | Cardinality | Sub_part | Packed | Null | Index_type | Comment | Index_comment | Visible | Expression |
++----------------+------------+-----------------------------------------+--------------+-------------+-----------+-------------+----------+--------+------+--------------------------------------------------------+---------+---------------+---------+------------+
+| system_metrics | 1 | INVERTED INDEX | 6 | desc1 | A | | | | YES | greptime-inverted-index-v1 | | | YES | |
+| system_metrics | 1 | INVERTED INDEX, FULLTEXT INDEX | 7 | desc2 | A | | | | YES | greptime-inverted-index-v1, greptime-fulltext-index-v1 | | | YES | |
+| system_metrics | 1 | FULLTEXT INDEX | 8 | desc3 | A | | | | YES | greptime-fulltext-index-v1 | | | YES | |
+| system_metrics | 1 | PRIMARY | 1 | host | A | | | | YES | greptime-inverted-index-v1 | | | YES | |
+| system_metrics | 1 | PRIMARY, INVERTED INDEX, FULLTEXT INDEX | 2 | idc | A | | | | YES | greptime-inverted-index-v1, greptime-fulltext-index-v1 | | | YES | |
+| system_metrics | 1 | TIME INDEX | 1 | ts | A | | | | NO | | | | YES | |
++----------------+------------+-----------------------------------------+--------------+-------------+-----------+-------------+----------+--------+------+--------------------------------------------------------+---------+---------------+---------+------------+
SHOW INDEX FROM system_metrics in public;
-+----------------+------------+------------+--------------+-------------+-----------+-------------+----------+--------+------+----------------------------+---------+---------------+---------+------------+
-| Table | Non_unique | Key_name | Seq_in_index | Column_name | Collation | Cardinality | Sub_part | Packed | Null | Index_type | Comment | Index_comment | Visible | Expression |
-+----------------+------------+------------+--------------+-------------+-----------+-------------+----------+--------+------+----------------------------+---------+---------------+---------+------------+
-| system_metrics | 1 | PRIMARY | 1 | host | A | | | | YES | greptime-inverted-index-v1 | | | YES | |
-| system_metrics | 1 | PRIMARY | 2 | idc | A | | | | YES | greptime-inverted-index-v1 | | | YES | |
-| system_metrics | 1 | TIME INDEX | 1 | ts | A | | | | NO | greptime-inverted-index-v1 | | | YES | |
-+----------------+------------+------------+--------------+-------------+-----------+-------------+----------+--------+------+----------------------------+---------+---------------+---------+------------+
++----------------+------------+-----------------------------------------+--------------+-------------+-----------+-------------+----------+--------+------+--------------------------------------------------------+---------+---------------+---------+------------+
+| Table | Non_unique | Key_name | Seq_in_index | Column_name | Collation | Cardinality | Sub_part | Packed | Null | Index_type | Comment | Index_comment | Visible | Expression |
++----------------+------------+-----------------------------------------+--------------+-------------+-----------+-------------+----------+--------+------+--------------------------------------------------------+---------+---------------+---------+------------+
+| system_metrics | 1 | INVERTED INDEX | 6 | desc1 | A | | | | YES | greptime-inverted-index-v1 | | | YES | |
+| system_metrics | 1 | INVERTED INDEX, FULLTEXT INDEX | 7 | desc2 | A | | | | YES | greptime-inverted-index-v1, greptime-fulltext-index-v1 | | | YES | |
+| system_metrics | 1 | FULLTEXT INDEX | 8 | desc3 | A | | | | YES | greptime-fulltext-index-v1 | | | YES | |
+| system_metrics | 1 | PRIMARY | 1 | host | A | | | | YES | greptime-inverted-index-v1 | | | YES | |
+| system_metrics | 1 | PRIMARY, INVERTED INDEX, FULLTEXT INDEX | 2 | idc | A | | | | YES | greptime-inverted-index-v1, greptime-fulltext-index-v1 | | | YES | |
+| system_metrics | 1 | TIME INDEX | 1 | ts | A | | | | NO | | | | YES | |
++----------------+------------+-----------------------------------------+--------------+-------------+-----------+-------------+----------+--------+------+--------------------------------------------------------+---------+---------------+---------+------------+
SHOW INDEX FROM system_metrics like '%util%';
@@ -62,11 +72,11 @@ Error: 1001(Unsupported), SQL statement is not supported, keyword: like
SHOW INDEX FROM system_metrics WHERE Key_name = 'TIME INDEX';
-+----------------+------------+------------+--------------+-------------+-----------+-------------+----------+--------+------+----------------------------+---------+---------------+---------+------------+
-| Table | Non_unique | Key_name | Seq_in_index | Column_name | Collation | Cardinality | Sub_part | Packed | Null | Index_type | Comment | Index_comment | Visible | Expression |
-+----------------+------------+------------+--------------+-------------+-----------+-------------+----------+--------+------+----------------------------+---------+---------------+---------+------------+
-| system_metrics | 1 | TIME INDEX | 1 | ts | A | | | | NO | greptime-inverted-index-v1 | | | YES | |
-+----------------+------------+------------+--------------+-------------+-----------+-------------+----------+--------+------+----------------------------+---------+---------------+---------+------------+
++----------------+------------+------------+--------------+-------------+-----------+-------------+----------+--------+------+------------+---------+---------------+---------+------------+
+| Table | Non_unique | Key_name | Seq_in_index | Column_name | Collation | Cardinality | Sub_part | Packed | Null | Index_type | Comment | Index_comment | Visible | Expression |
++----------------+------------+------------+--------------+-------------+-----------+-------------+----------+--------+------+------------+---------+---------------+---------+------------+
+| system_metrics | 1 | TIME INDEX | 1 | ts | A | | | | NO | | | | YES | |
++----------------+------------+------------+--------------+-------------+-----------+-------------+----------+--------+------+------------+---------+---------------+---------+------------+
DROP TABLE system_metrics;
diff --git a/tests/cases/standalone/common/show/show_index.sql b/tests/cases/standalone/common/show/show_index.sql
index 3f804db3845f..f0c5894a0ad7 100644
--- a/tests/cases/standalone/common/show/show_index.sql
+++ b/tests/cases/standalone/common/show/show_index.sql
@@ -1,11 +1,15 @@
CREATE TABLE IF NOT EXISTS system_metrics (
host STRING,
- idc STRING,
+ idc STRING FULLTEXT,
cpu_util DOUBLE,
memory_util DOUBLE,
disk_util DOUBLE,
+ desc1 STRING,
+ desc2 STRING FULLTEXT,
+ desc3 STRING FULLTEXT,
ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY(host, idc),
+ INVERTED INDEX(idc, desc1, desc2),
TIME INDEX(ts)
);
|
fix
|
display inverted and fulltext index in show index (#5169)
|
213758709170b7afda4f1eab2e509958579d0c35
|
2022-11-07 14:40:43
|
Lei, Huang
|
feat: datanode heartbeat (#377)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 22e51f0b7356..f21f03eafd88 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1628,6 +1628,8 @@ dependencies = [
"futures",
"hyper",
"log-store",
+ "meta-client",
+ "meta-srv",
"metrics",
"object-store",
"query",
diff --git a/benchmarks/Cargo.toml b/benchmarks/Cargo.toml
index 311f8867ac0b..f5908c9562c7 100644
--- a/benchmarks/Cargo.toml
+++ b/benchmarks/Cargo.toml
@@ -2,14 +2,13 @@
name = "benchmarks"
version = "0.1.0"
edition = "2021"
-
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
arrow = "10"
clap = { version = "4.0", features = ["derive"] }
client = { path = "../src/client" }
-itertools = "0.10.5"
indicatif = "0.17.1"
+itertools = "0.10.5"
parquet = { version = "*" }
tokio = { version = "1.21", features = ["full"] }
diff --git a/config/datanode.example.toml b/config/datanode.example.toml
index 866e80666477..18cba573b50b 100644
--- a/config/datanode.example.toml
+++ b/config/datanode.example.toml
@@ -1,3 +1,4 @@
+node_id = 42
http_addr = '0.0.0.0:3000'
rpc_addr = '0.0.0.0:3001'
wal_dir = '/tmp/greptimedb/wal'
@@ -13,3 +14,9 @@ postgres_runtime_size = 4
[storage]
type = 'File'
data_dir = '/tmp/greptimedb/data/'
+
+[meta_client_opts]
+metasrv_addr = "1.1.1.1:3002"
+timeout_millis = 3000
+connect_timeout_millis = 5000
+tcp_nodelay = true
diff --git a/src/catalog/src/error.rs b/src/catalog/src/error.rs
index 4263c94de027..99e81a5767db 100644
--- a/src/catalog/src/error.rs
+++ b/src/catalog/src/error.rs
@@ -4,6 +4,7 @@ use common_error::ext::{BoxedError, ErrorExt};
use common_error::prelude::{Snafu, StatusCode};
use datafusion::error::DataFusionError;
use datatypes::arrow;
+use datatypes::schema::RawSchema;
use snafu::{Backtrace, ErrorCompat};
#[derive(Debug, Snafu)]
@@ -110,6 +111,19 @@ pub enum Error {
source: table::error::Error,
},
+ #[snafu(display(
+ "Invalid table schema in catalog entry, table:{}, schema: {:?}, source: {}",
+ table_info,
+ schema,
+ source
+ ))]
+ InvalidTableSchema {
+ table_info: String,
+ schema: RawSchema,
+ #[snafu(backtrace)]
+ source: datatypes::error::Error,
+ },
+
#[snafu(display("Failed to execute system catalog table scan, source: {}", source))]
SystemCatalogTableScanExec {
#[snafu(backtrace)]
@@ -170,6 +184,7 @@ impl ErrorExt for Error {
Error::MetaSrv { source, .. } => source.status_code(),
Error::SystemCatalogTableScan { source } => source.status_code(),
Error::SystemCatalogTableScanExec { source } => source.status_code(),
+ Error::InvalidTableSchema { source, .. } => source.status_code(),
}
}
diff --git a/src/catalog/src/remote/manager.rs b/src/catalog/src/remote/manager.rs
index 92236e924418..b89f427b0503 100644
--- a/src/catalog/src/remote/manager.rs
+++ b/src/catalog/src/remote/manager.rs
@@ -12,21 +12,21 @@ use common_catalog::{
SchemaKey, SchemaValue, TableKey, TableValue,
};
use common_telemetry::{debug, info};
-use datatypes::schema::Schema;
use futures::Stream;
use futures_util::StreamExt;
use snafu::{OptionExt, ResultExt};
use table::engine::{EngineContext, TableEngineRef};
use table::metadata::{TableId, TableVersion};
use table::requests::{CreateTableRequest, OpenTableRequest};
+use table::table::numbers::NumbersTable;
use table::TableRef;
use tokio::sync::Mutex;
-use crate::error::Result;
use crate::error::{
CatalogNotFoundSnafu, CreateTableSnafu, InvalidCatalogValueSnafu, OpenTableSnafu,
SchemaNotFoundSnafu, TableExistsSnafu,
};
+use crate::error::{InvalidTableSchemaSnafu, Result};
use crate::remote::{Kv, KvBackendRef};
use crate::{
handle_system_table_request, CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef,
@@ -164,7 +164,7 @@ impl RemoteCatalogManager {
/// Fetch catalogs/schemas/tables from remote catalog manager along with max table id allocated.
async fn initiate_catalogs(&self) -> Result<(HashMap<String, CatalogProviderRef>, TableId)> {
let mut res = HashMap::new();
- let max_table_id = MIN_USER_TABLE_ID;
+ let max_table_id = MIN_USER_TABLE_ID - 1;
// initiate default catalog and schema
let default_catalog = self.initiate_default_catalog().await?;
@@ -246,7 +246,7 @@ impl RemoteCatalogManager {
async fn initiate_default_catalog(&self) -> Result<CatalogProviderRef> {
let default_catalog = self.new_catalog_provider(DEFAULT_CATALOG_NAME);
let default_schema = self.new_schema_provider(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME);
- default_catalog.register_schema(DEFAULT_SCHEMA_NAME.to_string(), default_schema)?;
+ default_catalog.register_schema(DEFAULT_SCHEMA_NAME.to_string(), default_schema.clone())?;
let schema_key = SchemaKey {
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
@@ -310,13 +310,22 @@ impl RemoteCatalogManager {
})? {
Some(table) => Ok(table),
None => {
+ let schema = meta
+ .schema
+ .clone()
+ .try_into()
+ .context(InvalidTableSchemaSnafu {
+ table_info: format!("{}.{}.{}", catalog_name, schema_name, table_name,),
+ schema: meta.schema.clone(),
+ })?;
let req = CreateTableRequest {
id: *id,
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: table_name.clone(),
desc: None,
- schema: Arc::new(Schema::new(meta.schema.column_schemas.clone())),
+ schema: Arc::new(schema),
+ region_numbers: meta.region_numbers.clone(),
primary_key_indices: meta.primary_key_indices.clone(),
create_if_not_exists: true,
table_options: meta.options.clone(),
@@ -352,6 +361,15 @@ impl CatalogManager for RemoteCatalogManager {
let mut system_table_requests = self.system_table_requests.lock().await;
handle_system_table_request(self, self.engine.clone(), &mut system_table_requests).await?;
info!("All system table opened");
+
+ self.catalog(DEFAULT_CATALOG_NAME)
+ .unwrap()
+ .unwrap()
+ .schema(DEFAULT_SCHEMA_NAME)
+ .unwrap()
+ .unwrap()
+ .register_table("numbers".to_string(), Arc::new(NumbersTable::default()))
+ .unwrap();
Ok(())
}
@@ -512,6 +530,7 @@ impl CatalogProvider for RemoteCatalogProvider {
.context(InvalidCatalogValueSnafu)?,
)
.await?;
+
let prev_schemas = schemas.load();
let mut new_schemas = HashMap::with_capacity(prev_schemas.len() + 1);
new_schemas.clone_from(&prev_schemas);
@@ -590,7 +609,7 @@ impl SchemaProvider for RemoteSchemaProvider {
meta: table_info.meta.clone().into(),
id: table_info.ident.table_id,
node_id: self.node_id,
- regions_ids: vec![],
+ regions_ids: table.table_info().meta.region_numbers.clone(),
};
let backend = self.backend.clone();
let mutex = self.mutex.clone();
diff --git a/src/catalog/src/system.rs b/src/catalog/src/system.rs
index 5fd802d1edea..66dc2e1116c0 100644
--- a/src/catalog/src/system.rs
+++ b/src/catalog/src/system.rs
@@ -96,6 +96,7 @@ impl SystemCatalogTable {
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
desc: Some("System catalog table".to_string()),
schema: schema.clone(),
+ region_numbers: vec![0],
primary_key_indices: vec![ENTRY_TYPE_INDEX, KEY_INDEX, TIMESTAMP_INDEX],
create_if_not_exists: true,
table_options: HashMap::new(),
diff --git a/src/catalog/tests/remote_catalog_tests.rs b/src/catalog/tests/remote_catalog_tests.rs
index 40b9de8eb3cf..b54d15275c16 100644
--- a/src/catalog/tests/remote_catalog_tests.rs
+++ b/src/catalog/tests/remote_catalog_tests.rs
@@ -114,6 +114,7 @@ mod tests {
table_name: table_name.clone(),
desc: None,
schema: table_schema.clone(),
+ region_numbers: vec![0],
primary_key_indices: vec![],
create_if_not_exists: false,
table_options: Default::default(),
@@ -154,7 +155,7 @@ mod tests {
.schema(DEFAULT_SCHEMA_NAME)
.unwrap()
.unwrap();
- assert_eq!(Vec::<String>::new(), default_schema.table_names().unwrap());
+ assert_eq!(vec!["numbers"], default_schema.table_names().unwrap());
// register a new table with an nonexistent catalog
let catalog_name = DEFAULT_CATALOG_NAME.to_string();
@@ -173,6 +174,7 @@ mod tests {
table_name: table_name.clone(),
desc: None,
schema: table_schema.clone(),
+ region_numbers: vec![0],
primary_key_indices: vec![],
create_if_not_exists: false,
table_options: Default::default(),
@@ -188,7 +190,14 @@ mod tests {
table,
};
assert_eq!(1, catalog_manager.register_table(reg_req).await.unwrap());
- assert_eq!(vec![table_name], default_schema.table_names().unwrap());
+ assert_eq!(
+ HashSet::from([table_name, "numbers".to_string()]),
+ default_schema
+ .table_names()
+ .unwrap()
+ .into_iter()
+ .collect::<HashSet<_>>()
+ );
}
#[tokio::test]
@@ -225,6 +234,7 @@ mod tests {
table_name: "".to_string(),
desc: None,
schema: Arc::new(Schema::new(vec![])),
+ region_numbers: vec![0],
primary_key_indices: vec![],
create_if_not_exists: false,
table_options: Default::default(),
diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs
index 6fd3682a67f5..7c0919c81a4f 100644
--- a/src/cmd/src/datanode.rs
+++ b/src/cmd/src/datanode.rs
@@ -33,6 +33,8 @@ impl SubCommand {
#[derive(Debug, Parser)]
struct StartCommand {
+ #[clap(long)]
+ node_id: Option<u64>,
#[clap(long)]
http_addr: Option<String>,
#[clap(long)]
@@ -41,6 +43,8 @@ struct StartCommand {
mysql_addr: Option<String>,
#[clap(long)]
postgres_addr: Option<String>,
+ #[clap(long)]
+ metasrv_addr: Option<String>,
#[clap(short, long)]
config_file: Option<String>,
}
@@ -71,6 +75,9 @@ impl TryFrom<StartCommand> for DatanodeOptions {
DatanodeOptions::default()
};
+ if let Some(node_id) = cmd.node_id {
+ opts.node_id = node_id;
+ }
if let Some(addr) = cmd.http_addr {
opts.http_addr = addr;
}
@@ -83,7 +90,9 @@ impl TryFrom<StartCommand> for DatanodeOptions {
if let Some(addr) = cmd.postgres_addr {
opts.postgres_addr = addr;
}
-
+ if let Some(addr) = cmd.metasrv_addr {
+ opts.meta_client_opts.metasrv_addr = addr;
+ }
Ok(opts)
}
}
@@ -97,10 +106,12 @@ mod tests {
#[test]
fn test_read_from_config_file() {
let cmd = StartCommand {
+ node_id: None,
http_addr: None,
rpc_addr: None,
mysql_addr: None,
postgres_addr: None,
+ metasrv_addr: None,
config_file: Some(format!(
"{}/../../config/datanode.example.toml",
std::env::current_dir().unwrap().as_path().to_str().unwrap()
@@ -112,6 +123,13 @@ mod tests {
assert_eq!("/tmp/greptimedb/wal".to_string(), options.wal_dir);
assert_eq!("0.0.0.0:3306".to_string(), options.mysql_addr);
assert_eq!(4, options.mysql_runtime_size);
+ assert_eq!(
+ "1.1.1.1:3002".to_string(),
+ options.meta_client_opts.metasrv_addr
+ );
+ assert_eq!(5000, options.meta_client_opts.connect_timeout_millis);
+ assert_eq!(3000, options.meta_client_opts.timeout_millis);
+ assert!(options.meta_client_opts.tcp_nodelay);
assert_eq!("0.0.0.0:5432".to_string(), options.postgres_addr);
assert_eq!(4, options.postgres_runtime_size);
diff --git a/src/common/catalog/src/helper.rs b/src/common/catalog/src/helper.rs
index dfbd5ef2310c..db8e9b556516 100644
--- a/src/common/catalog/src/helper.rs
+++ b/src/common/catalog/src/helper.rs
@@ -102,7 +102,7 @@ impl TableKey {
pub struct TableValue {
pub id: TableId,
pub node_id: u64,
- pub regions_ids: Vec<u64>,
+ pub regions_ids: Vec<u32>,
pub meta: RawTableMeta,
}
@@ -278,6 +278,7 @@ mod tests {
engine_options: Default::default(),
value_indices: vec![2, 3],
options: Default::default(),
+ region_numbers: vec![1],
};
let value = TableValue {
diff --git a/src/common/substrait/src/df_logical.rs b/src/common/substrait/src/df_logical.rs
index 2e746cdc8123..45adbb748c98 100644
--- a/src/common/substrait/src/df_logical.rs
+++ b/src/common/substrait/src/df_logical.rs
@@ -377,6 +377,7 @@ mod test {
table_name: table_name.to_string(),
desc: None,
schema: Arc::new(Schema::new(supported_types())),
+ region_numbers: vec![0],
primary_key_indices: vec![],
create_if_not_exists: true,
table_options: Default::default(),
diff --git a/src/datanode/Cargo.toml b/src/datanode/Cargo.toml
index 031e78a424b0..0f6b1ae5d6c0 100644
--- a/src/datanode/Cargo.toml
+++ b/src/datanode/Cargo.toml
@@ -29,6 +29,8 @@ datatypes = { path = "../datatypes" }
futures = "0.3"
hyper = { version = "0.14", features = ["full"] }
log-store = { path = "../log-store" }
+meta-client = { path = "../meta-client" }
+meta-srv = { path = "../meta-srv", features = ["mock"] }
metrics = "0.20"
object-store = { path = "../object-store" }
query = { path = "../query" }
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index 329db00ae27d..5286879c3ba7 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -15,13 +15,17 @@ pub enum ObjectStoreConfig {
impl Default for ObjectStoreConfig {
fn default() -> Self {
ObjectStoreConfig::File {
- data_dir: "/tmp/greptimedb/data/".to_string(),
+ data_dir: format!(
+ "/tmp/greptimedb/data/{}",
+ common_time::util::current_time_millis()
+ ),
}
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct DatanodeOptions {
+ pub node_id: u64,
pub http_addr: String,
pub rpc_addr: String,
pub rpc_runtime_size: usize,
@@ -29,6 +33,7 @@ pub struct DatanodeOptions {
pub mysql_runtime_size: usize,
pub postgres_addr: String,
pub postgres_runtime_size: usize,
+ pub meta_client_opts: MetaClientOpts,
pub wal_dir: String,
pub storage: ObjectStoreConfig,
}
@@ -36,6 +41,7 @@ pub struct DatanodeOptions {
impl Default for DatanodeOptions {
fn default() -> Self {
Self {
+ node_id: 0,
http_addr: "0.0.0.0:3000".to_string(),
rpc_addr: "0.0.0.0:3001".to_string(),
rpc_runtime_size: 8,
@@ -43,7 +49,11 @@ impl Default for DatanodeOptions {
mysql_runtime_size: 2,
postgres_addr: "0.0.0.0:5432".to_string(),
postgres_runtime_size: 2,
- wal_dir: "/tmp/greptimedb/wal".to_string(),
+ meta_client_opts: MetaClientOpts::default(),
+ wal_dir: format!(
+ "/tmp/greptimedb/wal/{}",
+ common_time::util::current_time_millis()
+ ),
storage: ObjectStoreConfig::default(),
}
}
@@ -72,3 +82,23 @@ impl Datanode {
self.services.start(&self.opts).await
}
}
+
+// Options for meta client in datanode instance.
+#[derive(Clone, Debug, Serialize, Deserialize)]
+pub struct MetaClientOpts {
+ pub metasrv_addr: String,
+ pub timeout_millis: u64,
+ pub connect_timeout_millis: u64,
+ pub tcp_nodelay: bool,
+}
+
+impl Default for MetaClientOpts {
+ fn default() -> Self {
+ Self {
+ metasrv_addr: "127.0.0.1:3002".to_string(),
+ timeout_millis: 3_000u64,
+ connect_timeout_millis: 5_000u64,
+ tcp_nodelay: true,
+ }
+ }
+}
diff --git a/src/datanode/src/error.rs b/src/datanode/src/error.rs
index 408a1cbd2118..c2c4adacda11 100644
--- a/src/datanode/src/error.rs
+++ b/src/datanode/src/error.rs
@@ -279,6 +279,12 @@ pub enum Error {
table_name: String,
source: catalog::error::Error,
},
+
+ #[snafu(display("Failed to initialize meta client, source: {}", source))]
+ MetaClientInit {
+ #[snafu(backtrace)]
+ source: meta_client::error::Error,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -346,6 +352,7 @@ impl ErrorExt for Error {
| Error::CollectRecordBatches { source } => source.status_code(),
Error::ArrowComputation { .. } => StatusCode::Unexpected,
+ Error::MetaClientInit { source, .. } => source.status_code(),
}
}
diff --git a/src/datanode/src/heartbeat.rs b/src/datanode/src/heartbeat.rs
new file mode 100644
index 000000000000..229742472f67
--- /dev/null
+++ b/src/datanode/src/heartbeat.rs
@@ -0,0 +1,97 @@
+use std::sync::atomic::{AtomicBool, Ordering};
+use std::sync::Arc;
+use std::time::Duration;
+
+use api::v1::meta::{HeartbeatRequest, HeartbeatResponse, Peer};
+use common_telemetry::{error, info, warn};
+use meta_client::client::{HeartbeatSender, MetaClient};
+use snafu::ResultExt;
+
+use crate::error::{MetaClientInitSnafu, Result};
+
+#[derive(Debug, Clone, Default)]
+pub struct HeartbeatTask {
+ node_id: u64,
+ server_addr: String,
+ started: Arc<AtomicBool>,
+ meta_client: MetaClient,
+ interval: u64,
+}
+
+impl HeartbeatTask {
+ /// Create a new heartbeat task instance.
+ pub fn new(node_id: u64, server_addr: String, meta_client: MetaClient) -> Self {
+ Self {
+ node_id,
+ server_addr,
+ started: Arc::new(AtomicBool::new(false)),
+ meta_client,
+ interval: 5_000, // default interval is set to 5 secs
+ }
+ }
+
+ pub async fn create_streams(meta_client: &MetaClient) -> Result<HeartbeatSender> {
+ let (tx, mut rx) = meta_client.heartbeat().await.context(MetaClientInitSnafu)?;
+ common_runtime::spawn_bg(async move {
+ while let Some(res) = match rx.message().await {
+ Ok(m) => m,
+ Err(e) => {
+ error!(e; "Error while reading heartbeat response");
+ None
+ }
+ } {
+ Self::handle_response(res).await;
+ }
+ info!("Heartbeat handling loop exit.")
+ });
+ Ok(tx)
+ }
+
+ async fn handle_response(resp: HeartbeatResponse) {
+ info!("heartbeat response: {:?}", resp);
+ }
+
+ /// Start heartbeat task, spawn background task.
+ pub async fn start(&self) -> Result<()> {
+ let started = self.started.clone();
+ if started
+ .compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire)
+ .is_err()
+ {
+ warn!("Heartbeat task started multiple times");
+ return Ok(());
+ }
+ let interval = self.interval;
+ let node_id = self.node_id;
+ let server_addr = self.server_addr.clone();
+ let meta_client = self.meta_client.clone();
+
+ let mut tx = Self::create_streams(&meta_client).await?;
+ common_runtime::spawn_bg(async move {
+ while started.load(Ordering::Acquire) {
+ let req = HeartbeatRequest {
+ peer: Some(Peer {
+ id: node_id,
+ addr: server_addr.clone(),
+ }),
+ ..Default::default()
+ };
+ if let Err(e) = tx.send(req).await {
+ error!("Failed to send heartbeat to metasrv, error: {:?}", e);
+ match Self::create_streams(&meta_client).await {
+ Ok(new_tx) => {
+ info!("Reconnected to metasrv");
+ tx = new_tx;
+ }
+ Err(e) => {
+ error!(e;"Failed to reconnect to metasrv!");
+ }
+ }
+ }
+ tokio::time::sleep(Duration::from_millis(interval)).await;
+ }
+ });
+
+ Ok(())
+ }
+}
diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs
index 4c88fdb5ede0..9c8a0897e2f3 100644
--- a/src/datanode/src/instance.rs
+++ b/src/datanode/src/instance.rs
@@ -1,8 +1,12 @@
+use std::time::Duration;
use std::{fs, path, sync::Arc};
+use catalog::remote::MetaKvBackend;
use catalog::CatalogManagerRef;
+use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
use common_telemetry::logging::info;
use log_store::fs::{config::LogConfig, log::LocalFileLogStore};
+use meta_client::client::{MetaClient, MetaClientBuilder};
use object_store::{services::fs::Builder, util, ObjectStore};
use query::query_engine::{QueryEngineFactory, QueryEngineRef};
use snafu::prelude::*;
@@ -10,8 +14,9 @@ use storage::{config::EngineConfig as StorageEngineConfig, EngineImpl};
use table_engine::config::EngineConfig as TableEngineConfig;
use table_engine::engine::MitoEngine;
-use crate::datanode::{DatanodeOptions, ObjectStoreConfig};
-use crate::error::{self, NewCatalogSnafu, Result};
+use crate::datanode::{DatanodeOptions, MetaClientOpts, ObjectStoreConfig};
+use crate::error::{self, MetaClientInitSnafu, NewCatalogSnafu, Result};
+use crate::heartbeat::HeartbeatTask;
use crate::script::ScriptExecutor;
use crate::server::grpc::plan::PhysicalPlanner;
use crate::sql::SqlHandler;
@@ -19,15 +24,18 @@ use crate::sql::SqlHandler;
mod grpc;
mod sql;
-type DefaultEngine = MitoEngine<EngineImpl<LocalFileLogStore>>;
+pub(crate) type DefaultEngine = MitoEngine<EngineImpl<LocalFileLogStore>>;
// An abstraction to read/write services.
pub struct Instance {
- query_engine: QueryEngineRef,
- sql_handler: SqlHandler,
- catalog_manager: CatalogManagerRef,
- physical_planner: PhysicalPlanner,
- script_executor: ScriptExecutor,
+ pub(crate) query_engine: QueryEngineRef,
+ pub(crate) sql_handler: SqlHandler,
+ pub(crate) catalog_manager: CatalogManagerRef,
+ pub(crate) physical_planner: PhysicalPlanner,
+ pub(crate) script_executor: ScriptExecutor,
+ #[allow(unused)]
+ pub(crate) meta_client: MetaClient,
+ pub(crate) heartbeat_task: HeartbeatTask,
}
pub type InstanceRef = Arc<Instance>;
@@ -36,6 +44,7 @@ impl Instance {
pub async fn new(opts: &DatanodeOptions) -> Result<Self> {
let object_store = new_object_store(&opts.storage).await?;
let log_store = create_local_file_log_store(opts).await?;
+ let meta_client = new_metasrv_client(opts.node_id, &opts.meta_client_opts).await?;
let table_engine = Arc::new(DefaultEngine::new(
TableEngineConfig::default(),
@@ -46,22 +55,34 @@ impl Instance {
),
object_store,
));
- let catalog_manager = Arc::new(
- catalog::local::LocalCatalogManager::try_new(table_engine.clone())
- .await
- .context(NewCatalogSnafu)?,
- );
+
+ // create remote catalog manager
+ let catalog_manager = Arc::new(catalog::remote::RemoteCatalogManager::new(
+ table_engine.clone(),
+ opts.node_id,
+ Arc::new(MetaKvBackend {
+ client: meta_client.clone(),
+ }),
+ ));
+
let factory = QueryEngineFactory::new(catalog_manager.clone());
let query_engine = factory.query_engine().clone();
let script_executor =
ScriptExecutor::new(catalog_manager.clone(), query_engine.clone()).await?;
+ let heartbeat_task = HeartbeatTask::new(
+ opts.node_id, /*node id not set*/
+ opts.rpc_addr.clone(),
+ meta_client.clone(),
+ );
Ok(Self {
query_engine: query_engine.clone(),
sql_handler: SqlHandler::new(table_engine, catalog_manager.clone()),
catalog_manager,
physical_planner: PhysicalPlanner::new(query_engine),
script_executor,
+ meta_client,
+ heartbeat_task,
})
}
@@ -70,6 +91,7 @@ impl Instance {
.start()
.await
.context(NewCatalogSnafu)?;
+ self.heartbeat_task.start().await?;
Ok(())
}
@@ -80,47 +102,9 @@ impl Instance {
pub fn catalog_manager(&self) -> &CatalogManagerRef {
&self.catalog_manager
}
-
- // This method is used in other crate's testing codes, so move it out of "cfg(test)".
- // TODO(LFC): Delete it when callers no longer need it.
- pub async fn new_mock() -> Result<Self> {
- use table_engine::table::test_util::new_test_object_store;
- use table_engine::table::test_util::MockEngine;
- use table_engine::table::test_util::MockMitoEngine;
-
- let (_dir, object_store) = new_test_object_store("setup_mock_engine_and_table").await;
- let mock_engine = Arc::new(MockMitoEngine::new(
- TableEngineConfig::default(),
- MockEngine::default(),
- object_store,
- ));
-
- let catalog_manager = Arc::new(
- catalog::local::manager::LocalCatalogManager::try_new(mock_engine.clone())
- .await
- .unwrap(),
- );
-
- let factory = QueryEngineFactory::new(catalog_manager.clone());
- let query_engine = factory.query_engine().clone();
-
- let sql_handler = SqlHandler::new(mock_engine.clone(), catalog_manager.clone());
- let physical_planner = PhysicalPlanner::new(query_engine.clone());
- let script_executor = ScriptExecutor::new(catalog_manager.clone(), query_engine.clone())
- .await
- .unwrap();
-
- Ok(Self {
- query_engine,
- sql_handler,
- catalog_manager,
- physical_planner,
- script_executor,
- })
- }
}
-async fn new_object_store(store_config: &ObjectStoreConfig) -> Result<ObjectStore> {
+pub(crate) async fn new_object_store(store_config: &ObjectStoreConfig) -> Result<ObjectStore> {
// TODO(dennis): supports other backend
let data_dir = util::normalize_dir(match store_config {
ObjectStoreConfig::File { data_dir } => data_dir,
@@ -139,7 +123,38 @@ async fn new_object_store(store_config: &ObjectStoreConfig) -> Result<ObjectStor
Ok(ObjectStore::new(accessor))
}
-async fn create_local_file_log_store(opts: &DatanodeOptions) -> Result<LocalFileLogStore> {
+/// Create metasrv client instance and spawn heartbeat loop.
+async fn new_metasrv_client(node_id: u64, meta_config: &MetaClientOpts) -> Result<MetaClient> {
+ let cluster_id = 0; // TODO(hl): read from config
+ let member_id = node_id;
+
+ let config = ChannelConfig::new()
+ .timeout(Duration::from_millis(meta_config.timeout_millis))
+ .connect_timeout(Duration::from_millis(meta_config.connect_timeout_millis))
+ .tcp_nodelay(meta_config.tcp_nodelay);
+ let channel_manager = ChannelManager::with_config(config);
+ let mut meta_client = MetaClientBuilder::new(cluster_id, member_id)
+ .enable_heartbeat()
+ .enable_router()
+ .enable_store()
+ .channel_manager(channel_manager)
+ .build();
+ meta_client
+ .start(&[&meta_config.metasrv_addr])
+ .await
+ .context(MetaClientInitSnafu)?;
+
+ // required only when the heartbeat_client is enabled
+ meta_client
+ .ask_leader()
+ .await
+ .context(MetaClientInitSnafu)?;
+ Ok(meta_client)
+}
+
+pub(crate) async fn create_local_file_log_store(
+ opts: &DatanodeOptions,
+) -> Result<LocalFileLogStore> {
// create WAL directory
fs::create_dir_all(path::Path::new(&opts.wal_dir))
.context(error::CreateDirSnafu { dir: &opts.wal_dir })?;
diff --git a/src/datanode/src/lib.rs b/src/datanode/src/lib.rs
index 91812978875a..bcf7097377b7 100644
--- a/src/datanode/src/lib.rs
+++ b/src/datanode/src/lib.rs
@@ -2,8 +2,10 @@
pub mod datanode;
pub mod error;
+mod heartbeat;
pub mod instance;
mod metric;
+mod mock;
mod script;
pub mod server;
mod sql;
diff --git a/src/datanode/src/mock.rs b/src/datanode/src/mock.rs
new file mode 100644
index 000000000000..78ab01cb0bf8
--- /dev/null
+++ b/src/datanode/src/mock.rs
@@ -0,0 +1,123 @@
+use std::sync::Arc;
+
+use catalog::remote::MetaKvBackend;
+use meta_client::client::{MetaClient, MetaClientBuilder};
+use query::QueryEngineFactory;
+use storage::config::EngineConfig as StorageEngineConfig;
+use storage::EngineImpl;
+use table_engine::config::EngineConfig as TableEngineConfig;
+
+use crate::datanode::DatanodeOptions;
+use crate::error::Result;
+use crate::heartbeat::HeartbeatTask;
+use crate::instance::{create_local_file_log_store, new_object_store, DefaultEngine, Instance};
+use crate::script::ScriptExecutor;
+use crate::server::grpc::plan::PhysicalPlanner;
+use crate::sql::SqlHandler;
+
+impl Instance {
+ // This method is used in other crate's testing codes, so move it out of "cfg(test)".
+ // TODO(LFC): Delete it when callers no longer need it.
+ pub async fn new_mock() -> Result<Self> {
+ use table_engine::table::test_util::new_test_object_store;
+ use table_engine::table::test_util::MockEngine;
+ use table_engine::table::test_util::MockMitoEngine;
+
+ let meta_client = mock_meta_client().await;
+ let (_dir, object_store) = new_test_object_store("setup_mock_engine_and_table").await;
+ let mock_engine = Arc::new(MockMitoEngine::new(
+ TableEngineConfig::default(),
+ MockEngine::default(),
+ object_store,
+ ));
+
+ let catalog_manager = Arc::new(
+ catalog::local::manager::LocalCatalogManager::try_new(mock_engine.clone())
+ .await
+ .unwrap(),
+ );
+
+ let factory = QueryEngineFactory::new(catalog_manager.clone());
+ let query_engine = factory.query_engine().clone();
+
+ let sql_handler = SqlHandler::new(mock_engine.clone(), catalog_manager.clone());
+ let physical_planner = PhysicalPlanner::new(query_engine.clone());
+ let script_executor = ScriptExecutor::new(catalog_manager.clone(), query_engine.clone())
+ .await
+ .unwrap();
+
+ let heartbeat_task =
+ HeartbeatTask::new(0, "127.0.0.1:3302".to_string(), meta_client.clone());
+ Ok(Self {
+ query_engine,
+ sql_handler,
+ catalog_manager,
+ physical_planner,
+ script_executor,
+ meta_client,
+ heartbeat_task,
+ })
+ }
+
+ pub async fn with_mock_meta_client(opts: &DatanodeOptions) -> Result<Self> {
+ let object_store = new_object_store(&opts.storage).await?;
+ let log_store = create_local_file_log_store(opts).await?;
+ let meta_client = mock_meta_client().await;
+ let table_engine = Arc::new(DefaultEngine::new(
+ TableEngineConfig::default(),
+ EngineImpl::new(
+ StorageEngineConfig::default(),
+ Arc::new(log_store),
+ object_store.clone(),
+ ),
+ object_store,
+ ));
+
+ // create remote catalog manager
+ let catalog_manager = Arc::new(catalog::remote::RemoteCatalogManager::new(
+ table_engine.clone(),
+ opts.node_id,
+ Arc::new(MetaKvBackend {
+ client: meta_client.clone(),
+ }),
+ ));
+
+ let factory = QueryEngineFactory::new(catalog_manager.clone());
+ let query_engine = factory.query_engine().clone();
+ let script_executor =
+ ScriptExecutor::new(catalog_manager.clone(), query_engine.clone()).await?;
+
+ let heartbeat_task =
+ HeartbeatTask::new(opts.node_id, opts.rpc_addr.clone(), meta_client.clone());
+ Ok(Self {
+ query_engine: query_engine.clone(),
+ sql_handler: SqlHandler::new(table_engine, catalog_manager.clone()),
+ catalog_manager,
+ physical_planner: PhysicalPlanner::new(query_engine),
+ script_executor,
+ meta_client,
+ heartbeat_task,
+ })
+ }
+}
+
+async fn mock_meta_client() -> MetaClient {
+ let mock_info = meta_srv::mocks::mock_with_memstore().await;
+ let meta_srv::mocks::MockInfo {
+ server_addr,
+ channel_manager,
+ } = mock_info;
+
+ let id = (1000u64, 2000u64);
+ let mut meta_client = MetaClientBuilder::new(id.0, id.1)
+ .enable_heartbeat()
+ .enable_router()
+ .enable_store()
+ .channel_manager(channel_manager)
+ .build();
+ meta_client.start(&[&server_addr]).await.unwrap();
+ // // required only when the heartbeat_client is enabled
+ meta_client.ask_leader().await.unwrap();
+
+ meta_client
+}
diff --git a/src/datanode/src/server/grpc/ddl.rs b/src/datanode/src/server/grpc/ddl.rs
index 4d52b3976784..87e6e161a2c6 100644
--- a/src/datanode/src/server/grpc/ddl.rs
+++ b/src/datanode/src/server/grpc/ddl.rs
@@ -84,6 +84,14 @@ impl Instance {
let schema_name = expr
.schema_name
.unwrap_or_else(|| DEFAULT_SCHEMA_NAME.to_string());
+
+ let region_id = expr
+ .table_options
+ .get(&"region_id".to_string())
+ .unwrap()
+ .parse::<u32>()
+ .unwrap();
+
Ok(CreateTableRequest {
id: table_id,
catalog_name,
@@ -91,6 +99,7 @@ impl Instance {
table_name: expr.table_name,
desc: expr.desc,
schema,
+ region_numbers: vec![region_id],
primary_key_indices,
create_if_not_exists: expr.create_if_not_exists,
table_options: expr.table_options,
@@ -179,10 +188,11 @@ mod tests {
use super::*;
use crate::tests::test_util;
- #[tokio::test]
+ #[tokio::test(flavor = "multi_thread")]
async fn test_create_expr_to_request() {
+ common_telemetry::init_default_ut_logging();
let (opts, _guard) = test_util::create_tmp_dir_and_datanode_opts("create_expr_to_request");
- let instance = Instance::new(&opts).await.unwrap();
+ let instance = Instance::with_mock_meta_client(&opts).await.unwrap();
instance.start().await.unwrap();
let expr = testing_create_expr();
@@ -291,6 +301,9 @@ mod tests {
default_constraint: None,
},
];
+ let table_options = [("region_id".to_string(), "0".to_string())]
+ .into_iter()
+ .collect::<HashMap<_, _>>();
CreateExpr {
catalog_name: None,
schema_name: None,
@@ -300,7 +313,7 @@ mod tests {
time_index: "ts".to_string(),
primary_keys: vec!["ts".to_string(), "host".to_string()],
create_if_not_exists: true,
- table_options: HashMap::new(),
+ table_options,
}
}
diff --git a/src/datanode/src/server/grpc/insert.rs b/src/datanode/src/server/grpc/insert.rs
index 6007d6db5048..817f55ea44f8 100644
--- a/src/datanode/src/server/grpc/insert.rs
+++ b/src/datanode/src/server/grpc/insert.rs
@@ -168,6 +168,7 @@ pub fn build_create_table_request(
create_if_not_exists: true,
primary_key_indices,
table_options: HashMap::new(),
+ region_numbers: vec![0],
});
}
diff --git a/src/datanode/src/sql/create.rs b/src/datanode/src/sql/create.rs
index e2e880fa6ead..fb24519bc2c2 100644
--- a/src/datanode/src/sql/create.rs
+++ b/src/datanode/src/sql/create.rs
@@ -155,6 +155,7 @@ impl SqlHandler {
table_name,
desc: None,
schema,
+ region_numbers: vec![0],
primary_key_indices: primary_keys,
create_if_not_exists: stmt.if_not_exists,
table_options: HashMap::new(),
diff --git a/src/datanode/src/tests/grpc_test.rs b/src/datanode/src/tests/grpc_test.rs
index 74e25658ba2f..cb19bd5291d9 100644
--- a/src/datanode/src/tests/grpc_test.rs
+++ b/src/datanode/src/tests/grpc_test.rs
@@ -24,7 +24,7 @@ async fn setup_grpc_server(name: &str, port: usize) -> (String, TestGuard, Arc<G
let (mut opts, guard) = test_util::create_tmp_dir_and_datanode_opts(name);
let addr = format!("127.0.0.1:{}", port);
opts.rpc_addr = addr.clone();
- let instance = Arc::new(Instance::new(&opts).await.unwrap());
+ let instance = Arc::new(Instance::with_mock_meta_client(&opts).await.unwrap());
instance.start().await.unwrap();
let addr_cloned = addr.clone();
@@ -50,7 +50,7 @@ async fn setup_grpc_server(name: &str, port: usize) -> (String, TestGuard, Arc<G
(addr, guard, grpc_server)
}
-#[tokio::test]
+#[tokio::test(flavor = "multi_thread")]
async fn test_auto_create_table() {
let (addr, _guard, grpc_server) = setup_grpc_server("auto_create_table", 3991).await;
@@ -116,8 +116,9 @@ fn expect_data() -> (Column, Column, Column, Column) {
)
}
-#[tokio::test]
+#[tokio::test(flavor = "multi_thread")]
async fn test_insert_and_select() {
+ common_telemetry::init_default_ut_logging();
let (addr, _guard, grpc_server) = setup_grpc_server("insert_and_select", 3990).await;
let grpc_client = Client::with_urls(vec![addr]);
@@ -247,6 +248,6 @@ fn testing_create_expr() -> CreateExpr {
time_index: "ts".to_string(),
primary_keys: vec!["ts".to_string(), "host".to_string()],
create_if_not_exists: true,
- table_options: HashMap::new(),
+ table_options: HashMap::from([("region_id".to_string(), "0".to_string())]),
}
}
diff --git a/src/datanode/src/tests/http_test.rs b/src/datanode/src/tests/http_test.rs
index 58e52689bc41..b84ec011970b 100644
--- a/src/datanode/src/tests/http_test.rs
+++ b/src/datanode/src/tests/http_test.rs
@@ -14,7 +14,7 @@ use crate::tests::test_util;
async fn make_test_app(name: &str) -> (Router, TestGuard) {
let (opts, guard) = test_util::create_tmp_dir_and_datanode_opts(name);
- let instance = Arc::new(Instance::new(&opts).await.unwrap());
+ let instance = Arc::new(Instance::with_mock_meta_client(&opts).await.unwrap());
instance.start().await.unwrap();
test_util::create_test_table(&instance, ConcreteDataType::timestamp_millis_datatype())
.await
@@ -23,7 +23,7 @@ async fn make_test_app(name: &str) -> (Router, TestGuard) {
(http_server.make_app(), guard)
}
-#[tokio::test]
+#[tokio::test(flavor = "multi_thread")]
async fn test_sql_api() {
common_telemetry::init_default_ut_logging();
let (app, _guard) = make_test_app("sql_api").await;
@@ -83,7 +83,7 @@ async fn test_sql_api() {
);
}
-#[tokio::test]
+#[tokio::test(flavor = "multi_thread")]
async fn test_metrics_api() {
common_telemetry::init_default_ut_logging();
common_telemetry::init_default_metrics_recorder();
@@ -104,7 +104,7 @@ async fn test_metrics_api() {
assert!(body.contains("datanode_handle_sql_elapsed"));
}
-#[tokio::test]
+#[tokio::test(flavor = "multi_thread")]
async fn test_scripts_api() {
common_telemetry::init_default_ut_logging();
let (app, _guard) = make_test_app("scripts_api").await;
diff --git a/src/datanode/src/tests/instance_test.rs b/src/datanode/src/tests/instance_test.rs
index a65e867780bf..4bbaf9d14d73 100644
--- a/src/datanode/src/tests/instance_test.rs
+++ b/src/datanode/src/tests/instance_test.rs
@@ -1,20 +1,18 @@
use arrow::array::{Int64Array, UInt64Array};
use common_query::Output;
use common_recordbatch::util;
-use datafusion::arrow_print;
-use datafusion_common::record_batch::RecordBatch as DfRecordBatch;
use datatypes::arrow_array::StringArray;
use datatypes::prelude::ConcreteDataType;
use crate::instance::Instance;
use crate::tests::test_util;
-#[tokio::test]
+#[tokio::test(flavor = "multi_thread")]
async fn test_execute_insert() {
common_telemetry::init_default_ut_logging();
let (opts, _guard) = test_util::create_tmp_dir_and_datanode_opts("execute_insert");
- let instance = Instance::new(&opts).await.unwrap();
+ let instance = Instance::with_mock_meta_client(&opts).await.unwrap();
instance.start().await.unwrap();
test_util::create_test_table(&instance, ConcreteDataType::timestamp_millis_datatype())
@@ -33,12 +31,12 @@ async fn test_execute_insert() {
assert!(matches!(output, Output::AffectedRows(2)));
}
-#[tokio::test]
+#[tokio::test(flavor = "multi_thread")]
async fn test_execute_insert_query_with_i64_timestamp() {
common_telemetry::init_default_ut_logging();
let (opts, _guard) = test_util::create_tmp_dir_and_datanode_opts("insert_query_i64_timestamp");
- let instance = Instance::new(&opts).await.unwrap();
+ let instance = Instance::with_mock_meta_client(&opts).await.unwrap();
instance.start().await.unwrap();
test_util::create_test_table(&instance, ConcreteDataType::int64_datatype())
@@ -72,10 +70,10 @@ async fn test_execute_insert_query_with_i64_timestamp() {
}
}
-#[tokio::test]
+#[tokio::test(flavor = "multi_thread")]
async fn test_execute_query() {
let (opts, _guard) = test_util::create_tmp_dir_and_datanode_opts("execute_query");
- let instance = Instance::new(&opts).await.unwrap();
+ let instance = Instance::with_mock_meta_client(&opts).await.unwrap();
instance.start().await.unwrap();
let output = instance
@@ -98,11 +96,11 @@ async fn test_execute_query() {
}
}
-#[tokio::test]
+#[tokio::test(flavor = "multi_thread")]
async fn test_execute_show_databases_tables() {
let (opts, _guard) =
test_util::create_tmp_dir_and_datanode_opts("execute_show_databases_tables");
- let instance = Instance::new(&opts).await.unwrap();
+ let instance = Instance::with_mock_meta_client(&opts).await.unwrap();
instance.start().await.unwrap();
let output = instance.execute_sql("show databases").await.unwrap();
@@ -188,12 +186,12 @@ async fn test_execute_show_databases_tables() {
}
}
-#[tokio::test]
+#[tokio::test(flavor = "multi_thread")]
pub async fn test_execute_create() {
common_telemetry::init_default_ut_logging();
let (opts, _guard) = test_util::create_tmp_dir_and_datanode_opts("execute_create");
- let instance = Instance::new(&opts).await.unwrap();
+ let instance = Instance::with_mock_meta_client(&opts).await.unwrap();
instance.start().await.unwrap();
let output = instance
@@ -212,13 +210,13 @@ pub async fn test_execute_create() {
assert!(matches!(output, Output::AffectedRows(1)));
}
-#[tokio::test]
+#[tokio::test(flavor = "multi_thread")]
pub async fn test_create_table_illegal_timestamp_type() {
common_telemetry::init_default_ut_logging();
let (opts, _guard) =
test_util::create_tmp_dir_and_datanode_opts("create_table_illegal_timestamp_type");
- let instance = Instance::new(&opts).await.unwrap();
+ let instance = Instance::with_mock_meta_client(&opts).await.unwrap();
instance.start().await.unwrap();
let output = instance
@@ -244,6 +242,8 @@ pub async fn test_create_table_illegal_timestamp_type() {
#[tokio::test]
async fn test_alter_table() {
+ use datafusion::arrow_print;
+ use datafusion_common::record_batch::RecordBatch as DfRecordBatch;
// TODO(LFC) Use real Mito engine when we can alter its region schema,
// and delete the `new_mock` method.
let instance = Instance::new_mock().await.unwrap();
diff --git a/src/datanode/src/tests/test_util.rs b/src/datanode/src/tests/test_util.rs
index ec8f9f66141f..d54f15f8c190 100644
--- a/src/datanode/src/tests/test_util.rs
+++ b/src/datanode/src/tests/test_util.rs
@@ -72,6 +72,7 @@ pub async fn create_test_table(instance: &Instance, ts_type: ConcreteDataType) -
create_if_not_exists: true,
primary_key_indices: vec![3, 0], // "host" and "ts" are primary keys
table_options: HashMap::new(),
+ region_numbers: vec![0],
},
)
.await
diff --git a/src/frontend/Cargo.toml b/src/frontend/Cargo.toml
index 7f5b08dc813a..c3676864027f 100644
--- a/src/frontend/Cargo.toml
+++ b/src/frontend/Cargo.toml
@@ -21,9 +21,9 @@ datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch =
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2" }
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2" }
datatypes = { path = "../datatypes" }
+itertools = "0.10"
openmetrics-parser = "0.4"
prost = "0.11"
-itertools = "0.10"
query = { path = "../query" }
serde = "1.0"
servers = { path = "../servers" }
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index d840222a5942..ffc420d13935 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -161,7 +161,10 @@ fn create_to_expr(create: CreateTable) -> Result<CreateExpr> {
primary_keys: find_primary_keys(&create.constraints)?,
create_if_not_exists: create.if_not_exists,
// TODO(LFC): Fill in other table options.
- table_options: HashMap::from([("engine".to_string(), create.engine)]),
+ table_options: HashMap::from([
+ ("engine".to_string(), create.engine),
+ ("region_id".to_string(), "0".to_string()),
+ ]),
..Default::default()
};
Ok(expr)
@@ -550,12 +553,15 @@ mod tests {
default_constraint: None,
},
];
+ let mut table_options = HashMap::with_capacity(1);
+ table_options.insert("region_id".to_string(), "0".to_string());
CreateExpr {
table_name: "demo".to_string(),
column_defs,
time_index: "ts".to_string(),
primary_keys: vec!["ts".to_string(), "host".to_string()],
create_if_not_exists: true,
+ table_options,
..Default::default()
}
}
diff --git a/src/frontend/src/table.rs b/src/frontend/src/table.rs
index 879e058ea170..39e8a379ace9 100644
--- a/src/frontend/src/table.rs
+++ b/src/frontend/src/table.rs
@@ -328,7 +328,7 @@ mod test {
use super::*;
use crate::partitioning::range::RangePartitionRule;
- #[tokio::test]
+ #[tokio::test(flavor = "multi_thread")]
async fn test_dist_table_scan() {
let table = Arc::new(new_dist_table().await);
@@ -475,7 +475,7 @@ mod test {
..Default::default()
};
- let instance = Arc::new(Instance::new(&opts).await.unwrap());
+ let instance = Arc::new(Instance::with_mock_meta_client(&opts).await.unwrap());
instance.start().await.unwrap();
let catalog_manager = instance.catalog_manager().clone();
@@ -498,7 +498,7 @@ mod test {
)
}
- #[tokio::test]
+ #[tokio::test(flavor = "multi_thread")]
async fn test_find_regions() {
let table = new_dist_table().await;
diff --git a/src/meta-client/src/client.rs b/src/meta-client/src/client.rs
index 10a5b34e58ed..0d1e3cbc197d 100644
--- a/src/meta-client/src/client.rs
+++ b/src/meta-client/src/client.rs
@@ -11,8 +11,8 @@ use router::Client as RouterClient;
use snafu::OptionExt;
use store::Client as StoreClient;
-use self::heartbeat::HeartbeatSender;
-use self::heartbeat::HeartbeatStream;
+pub use self::heartbeat::HeartbeatSender;
+pub use self::heartbeat::HeartbeatStream;
use crate::error;
use crate::error::Result;
use crate::rpc::BatchPutRequest;
diff --git a/src/meta-client/src/rpc.rs b/src/meta-client/src/rpc.rs
index 0e34f0057c1d..3fa5b1a60511 100644
--- a/src/meta-client/src/rpc.rs
+++ b/src/meta-client/src/rpc.rs
@@ -1,6 +1,6 @@
mod router;
mod store;
-mod util;
+pub mod util;
use api::v1::meta::KeyValue as PbKeyValue;
use api::v1::meta::Peer as PbPeer;
diff --git a/src/script/src/table.rs b/src/script/src/table.rs
index fec37e666748..19b58178df19 100644
--- a/src/script/src/table.rs
+++ b/src/script/src/table.rs
@@ -47,6 +47,7 @@ impl ScriptsTable {
desc: Some("Scripts table".to_string()),
schema,
// name and timestamp as primary key
+ region_numbers: vec![0],
primary_key_indices: vec![0, 3],
create_if_not_exists: true,
table_options: HashMap::default(),
diff --git a/src/table-engine/src/engine.rs b/src/table-engine/src/engine.rs
index beb3ea957865..09dcd13ffa5d 100644
--- a/src/table-engine/src/engine.rs
+++ b/src/table-engine/src/engine.rs
@@ -270,7 +270,8 @@ impl<S: StorageEngine> MitoEngineInner<S> {
let table_id = request.id;
// TODO(dennis): supports multi regions;
- let region_number = 0;
+ assert_eq!(1, request.region_numbers.len());
+ let region_number = request.region_numbers[0];
let region_id = region_id(table_id, region_number);
let region_name = region_name(table_id, region_number);
@@ -311,6 +312,7 @@ impl<S: StorageEngine> MitoEngineInner<S> {
.engine(MITO_ENGINE)
.next_column_id(next_column_id)
.primary_key_indices(request.primary_key_indices.clone())
+ .region_numbers(vec![region_number])
.build()
.context(error::BuildTableMetaSnafu { table_name })?;
@@ -495,6 +497,7 @@ mod tests {
create_if_not_exists: true,
primary_key_indices: Vec::default(),
table_options: HashMap::new(),
+ region_numbers: vec![0],
},
)
.await
@@ -753,6 +756,7 @@ mod tests {
desc: None,
primary_key_indices: Vec::default(),
table_options: HashMap::new(),
+ region_numbers: vec![0],
};
let created_table = table_engine.create_table(&ctx, request).await.unwrap();
@@ -776,6 +780,7 @@ mod tests {
desc: None,
primary_key_indices: Vec::default(),
table_options: HashMap::new(),
+ region_numbers: vec![0],
};
let result = table_engine.create_table(&ctx, request).await;
diff --git a/src/table-engine/src/table.rs b/src/table-engine/src/table.rs
index 2a6e23997082..52ea98dc947f 100644
--- a/src/table-engine/src/table.rs
+++ b/src/table-engine/src/table.rs
@@ -484,10 +484,10 @@ impl<R: Region> MitoTable<R> {
) -> Result<MitoTable<R>> {
let manifest = TableManifest::new(&table_manifest_dir(table_name), object_store);
- let table_info = Self::recover_table_info(table_name, &manifest)
+ let mut table_info = Self::recover_table_info(table_name, &manifest)
.await?
.context(TableInfoNotFoundSnafu { table_name })?;
-
+ table_info.meta.region_numbers = vec![(region.id() & 0xFFFFFFFF) as u32];
Ok(MitoTable::new(table_info, region, manifest))
}
diff --git a/src/table-engine/src/table/test_util.rs b/src/table-engine/src/table/test_util.rs
index dd4a3ac3bb57..00e6a08c977a 100644
--- a/src/table-engine/src/table/test_util.rs
+++ b/src/table-engine/src/table/test_util.rs
@@ -103,6 +103,7 @@ pub async fn setup_test_engine_and_table() -> (
create_if_not_exists: true,
primary_key_indices: Vec::default(),
table_options: HashMap::new(),
+ region_numbers: vec![0],
},
)
.await
@@ -135,6 +136,7 @@ pub async fn setup_mock_engine_and_table(
create_if_not_exists: true,
primary_key_indices: Vec::default(),
table_options: HashMap::new(),
+ region_numbers: vec![0],
},
)
.await
diff --git a/src/table/src/metadata.rs b/src/table/src/metadata.rs
index 9212f6265a25..4a26c16d8b7c 100644
--- a/src/table/src/metadata.rs
+++ b/src/table/src/metadata.rs
@@ -55,6 +55,8 @@ pub struct TableMeta {
pub value_indices: Vec<usize>,
#[builder(default, setter(into))]
pub engine: String,
+ #[builder(default, setter(into))]
+ pub region_numbers: Vec<u32>,
pub next_column_id: ColumnId,
/// Options for table engine.
#[builder(default)]
@@ -162,6 +164,7 @@ pub struct RawTableMeta {
pub value_indices: Vec<usize>,
pub engine: String,
pub next_column_id: ColumnId,
+ pub region_numbers: Vec<u32>,
pub engine_options: HashMap<String, String>,
pub options: HashMap<String, String>,
pub created_on: DateTime<Utc>,
@@ -175,6 +178,7 @@ impl From<TableMeta> for RawTableMeta {
value_indices: meta.value_indices,
engine: meta.engine,
next_column_id: meta.next_column_id,
+ region_numbers: meta.region_numbers,
engine_options: meta.engine_options,
options: meta.options,
created_on: meta.created_on,
@@ -191,6 +195,7 @@ impl TryFrom<RawTableMeta> for TableMeta {
primary_key_indices: raw.primary_key_indices,
value_indices: raw.value_indices,
engine: raw.engine,
+ region_numbers: vec![],
next_column_id: raw.next_column_id,
engine_options: raw.engine_options,
options: raw.options,
diff --git a/src/table/src/requests.rs b/src/table/src/requests.rs
index 115bf41ee327..449dca5d692f 100644
--- a/src/table/src/requests.rs
+++ b/src/table/src/requests.rs
@@ -22,6 +22,7 @@ pub struct CreateTableRequest {
pub table_name: String,
pub desc: Option<String>,
pub schema: SchemaRef,
+ pub region_numbers: Vec<u32>,
pub primary_key_indices: Vec<usize>,
pub create_if_not_exists: bool,
pub table_options: HashMap<String, String>,
diff --git a/src/table/src/table/numbers.rs b/src/table/src/table/numbers.rs
index 119ad78b9cbc..cf4f4303a2b4 100644
--- a/src/table/src/table/numbers.rs
+++ b/src/table/src/table/numbers.rs
@@ -8,12 +8,12 @@ use common_recordbatch::{RecordBatch, RecordBatchStream};
use datafusion_common::record_batch::RecordBatch as DfRecordBatch;
use datatypes::arrow::array::UInt32Array;
use datatypes::data_type::ConcreteDataType;
-use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
+use datatypes::schema::{ColumnSchema, SchemaBuilder, SchemaRef};
use futures::task::{Context, Poll};
use futures::Stream;
use crate::error::Result;
-use crate::metadata::TableInfoRef;
+use crate::metadata::{TableInfoBuilder, TableInfoRef, TableMetaBuilder, TableType};
use crate::table::scan::SimpleTableScan;
use crate::table::{Expr, Table};
@@ -31,7 +31,12 @@ impl Default for NumbersTable {
false,
)];
Self {
- schema: Arc::new(Schema::new(column_schemas)),
+ schema: Arc::new(
+ SchemaBuilder::try_from_columns(column_schemas)
+ .unwrap()
+ .build()
+ .unwrap(),
+ ),
}
}
}
@@ -47,7 +52,26 @@ impl Table for NumbersTable {
}
fn table_info(&self) -> TableInfoRef {
- unimplemented!()
+ Arc::new(
+ TableInfoBuilder::default()
+ .table_id(1)
+ .name("numbers")
+ .catalog_name("greptime")
+ .schema_name("public")
+ .table_version(0)
+ .table_type(TableType::Base)
+ .meta(
+ TableMetaBuilder::default()
+ .schema(self.schema.clone())
+ .region_numbers(vec![0])
+ .primary_key_indices(vec![0])
+ .next_column_id(1)
+ .build()
+ .unwrap(),
+ )
+ .build()
+ .unwrap(),
+ )
}
async fn scan(
|
feat
|
datanode heartbeat (#377)
|
fa4a497d758fb9b95646593b01163f49a4d3f498
|
2023-05-24 12:37:29
|
fys
|
feat: add cache for catalog kv backend (#1592)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index ce65ac5592c5..550f189efae5 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1204,6 +1204,7 @@ dependencies = [
"meta-client",
"metrics",
"mito",
+ "moka 0.11.0",
"object-store",
"parking_lot",
"regex",
@@ -3113,7 +3114,7 @@ dependencies = [
"meter-core",
"meter-macros",
"mito",
- "moka",
+ "moka 0.9.7",
"object-store",
"openmetrics-parser",
"partition",
@@ -4775,6 +4776,15 @@ dependencies = [
"libc",
]
+[[package]]
+name = "mach2"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6d0d1830bcd151a6fc4aea1369af235b36c1528fe976b8ff678683c9995eade8"
+dependencies = [
+ "libc",
+]
+
[[package]]
name = "maplit"
version = "1.0.2"
@@ -4985,7 +4995,7 @@ dependencies = [
"metrics-util",
"parking_lot",
"portable-atomic",
- "quanta",
+ "quanta 0.10.1",
"thiserror",
]
@@ -5016,7 +5026,7 @@ dependencies = [
"ordered-float 2.10.0",
"parking_lot",
"portable-atomic",
- "quanta",
+ "quanta 0.10.1",
"radix_trie",
"sketches-ddsketch",
]
@@ -5115,7 +5125,33 @@ dependencies = [
"num_cpus",
"once_cell",
"parking_lot",
- "quanta",
+ "quanta 0.10.1",
+ "rustc_version 0.4.0",
+ "scheduled-thread-pool",
+ "skeptic",
+ "smallvec",
+ "tagptr",
+ "thiserror",
+ "triomphe",
+ "uuid",
+]
+
+[[package]]
+name = "moka"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "934030d03f6191edbb4ba16835ccdb80d560788ac686570a8e2986a0fb59ded8"
+dependencies = [
+ "async-io",
+ "async-lock",
+ "crossbeam-channel",
+ "crossbeam-epoch",
+ "crossbeam-utils",
+ "futures-util",
+ "num_cpus",
+ "once_cell",
+ "parking_lot",
+ "quanta 0.11.0",
"rustc_version 0.4.0",
"scheduled-thread-pool",
"skeptic",
@@ -5847,7 +5883,7 @@ dependencies = [
"datafusion-expr",
"datatypes",
"meta-client",
- "moka",
+ "moka 0.9.7",
"serde",
"serde_json",
"snafu",
@@ -6578,6 +6614,22 @@ dependencies = [
"winapi",
]
+[[package]]
+name = "quanta"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8cc73c42f9314c4bdce450c77e6f09ecbddefbeddb1b5979ded332a3913ded33"
+dependencies = [
+ "crossbeam-utils",
+ "libc",
+ "mach2",
+ "once_cell",
+ "raw-cpuid",
+ "wasi 0.11.0+wasi-snapshot-preview1",
+ "web-sys",
+ "winapi",
+]
+
[[package]]
name = "query"
version = "0.2.0"
diff --git a/src/catalog/Cargo.toml b/src/catalog/Cargo.toml
index 5ceb7b2954a0..115b4f8c18a0 100644
--- a/src/catalog/Cargo.toml
+++ b/src/catalog/Cargo.toml
@@ -29,6 +29,7 @@ key-lock = "0.1"
lazy_static = "1.4"
meta-client = { path = "../meta-client" }
metrics.workspace = true
+moka = { version = "0.11", features = ["future"] }
parking_lot = "0.12"
regex = "1.6"
serde = "1.0"
diff --git a/src/catalog/src/error.rs b/src/catalog/src/error.rs
index 072f382f36fb..79b2c57ab3f9 100644
--- a/src/catalog/src/error.rs
+++ b/src/catalog/src/error.rs
@@ -233,6 +233,9 @@ pub enum Error {
#[snafu(backtrace)]
source: table::error::Error,
},
+
+ #[snafu(display("A generic error has occurred, msg: {}", msg))]
+ Generic { msg: String, location: Location },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -253,7 +256,7 @@ impl ErrorExt for Error {
| Error::EmptyValue { .. }
| Error::ValueDeserialize { .. } => StatusCode::StorageUnavailable,
- Error::SystemCatalogTypeMismatch { .. } => StatusCode::Internal,
+ Error::Generic { .. } | Error::SystemCatalogTypeMismatch { .. } => StatusCode::Internal,
Error::ReadSystemCatalog { source, .. } | Error::CreateRecordBatch { source } => {
source.status_code()
diff --git a/src/catalog/src/lib.rs b/src/catalog/src/lib.rs
index ad8a1735ccad..09876a2dc005 100644
--- a/src/catalog/src/lib.rs
+++ b/src/catalog/src/lib.rs
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#![feature(trait_upcasting)]
#![feature(assert_matches)]
use std::any::Any;
diff --git a/src/catalog/src/metrics.rs b/src/catalog/src/metrics.rs
index 759715020735..6e481c15e2e8 100644
--- a/src/catalog/src/metrics.rs
+++ b/src/catalog/src/metrics.rs
@@ -20,6 +20,9 @@ pub(crate) const METRIC_CATALOG_MANAGER_CATALOG_COUNT: &str = "catalog.catalog_c
pub(crate) const METRIC_CATALOG_MANAGER_SCHEMA_COUNT: &str = "catalog.schema_count";
pub(crate) const METRIC_CATALOG_MANAGER_TABLE_COUNT: &str = "catalog.table_count";
+pub(crate) const METRIC_CATALOG_KV_REMOTE_GET: &str = "catalog.kv.get.remote";
+pub(crate) const METRIC_CATALOG_KV_GET: &str = "catalog.kv.get";
+
#[inline]
pub(crate) fn db_label(catalog: &str, schema: &str) -> (&'static str, String) {
(METRIC_DB_LABEL, build_db_string(catalog, schema))
diff --git a/src/catalog/src/remote.rs b/src/catalog/src/remote.rs
index f66cc409635c..431cbbed349c 100644
--- a/src/catalog/src/remote.rs
+++ b/src/catalog/src/remote.rs
@@ -16,7 +16,7 @@ use std::fmt::Debug;
use std::pin::Pin;
use std::sync::Arc;
-pub use client::MetaKvBackend;
+pub use client::CachedMetaKvBackend;
use futures::Stream;
use futures_util::StreamExt;
pub use manager::{RemoteCatalogManager, RemoteCatalogProvider, RemoteSchemaProvider};
@@ -74,6 +74,13 @@ pub trait KvBackend: Send + Sync {
pub type KvBackendRef = Arc<dyn KvBackend>;
+#[async_trait::async_trait]
+pub trait KvCacheInvalidator: Send + Sync {
+ async fn invalidate_key(&self, key: &[u8]);
+}
+
+pub type KvCacheInvalidatorRef = Arc<dyn KvCacheInvalidator>;
+
#[cfg(test)]
mod tests {
use async_stream::stream;
@@ -119,12 +126,16 @@ mod tests {
#[tokio::test]
async fn test_get() {
let backend = MockKvBackend {};
+
let result = backend.get(0.to_string().as_bytes()).await;
assert_eq!(0.to_string().as_bytes(), result.unwrap().unwrap().0);
+
let result = backend.get(1.to_string().as_bytes()).await;
assert_eq!(1.to_string().as_bytes(), result.unwrap().unwrap().0);
+
let result = backend.get(2.to_string().as_bytes()).await;
assert_eq!(2.to_string().as_bytes(), result.unwrap().unwrap().0);
+
let result = backend.get(3.to_string().as_bytes()).await;
assert!(result.unwrap().is_none());
}
diff --git a/src/catalog/src/remote/client.rs b/src/catalog/src/remote/client.rs
index b981a89bea1c..64ca217c06fd 100644
--- a/src/catalog/src/remote/client.rs
+++ b/src/catalog/src/remote/client.rs
@@ -14,15 +14,124 @@
use std::fmt::Debug;
use std::sync::Arc;
+use std::time::Duration;
use async_stream::stream;
use common_meta::rpc::store::{CompareAndPutRequest, DeleteRangeRequest, PutRequest, RangeRequest};
-use common_telemetry::info;
+use common_telemetry::{info, timer};
use meta_client::client::MetaClient;
+use moka::future::{Cache, CacheBuilder};
use snafu::ResultExt;
-use crate::error::{Error, MetaSrvSnafu};
-use crate::remote::{Kv, KvBackend, ValueIter};
+use super::KvCacheInvalidator;
+use crate::error::{Error, GenericSnafu, MetaSrvSnafu, Result};
+use crate::metrics::{METRIC_CATALOG_KV_GET, METRIC_CATALOG_KV_REMOTE_GET};
+use crate::remote::{Kv, KvBackend, KvBackendRef, ValueIter};
+
+const CACHE_MAX_CAPACITY: u64 = 10000;
+const CACHE_TTL_SECOND: u64 = 10 * 60;
+const CACHE_TTI_SECOND: u64 = 5 * 60;
+
+pub struct CachedMetaKvBackend {
+ kv_backend: KvBackendRef,
+ cache: Arc<Cache<Vec<u8>, Option<Kv>>>,
+}
+
+#[async_trait::async_trait]
+impl KvBackend for CachedMetaKvBackend {
+ fn range<'a, 'b>(&'a self, key: &[u8]) -> ValueIter<'b, Error>
+ where
+ 'a: 'b,
+ {
+ self.kv_backend.range(key)
+ }
+
+ async fn get(&self, key: &[u8]) -> Result<Option<Kv>> {
+ let _timer = timer!(METRIC_CATALOG_KV_GET);
+
+ let init = async {
+ let _timer = timer!(METRIC_CATALOG_KV_REMOTE_GET);
+
+ self.kv_backend.get(key).await
+ };
+
+ let schema_provider = self.cache.try_get_with_by_ref(key, init).await;
+ schema_provider.map_err(|e| GenericSnafu { msg: e.to_string() }.build())
+ }
+
+ async fn set(&self, key: &[u8], val: &[u8]) -> Result<()> {
+ let ret = self.kv_backend.set(key, val).await;
+
+ if ret.is_ok() {
+ self.invalidate_key(key).await;
+ }
+
+ ret
+ }
+
+ async fn delete(&self, key: &[u8]) -> Result<()> {
+ let ret = self.kv_backend.delete_range(key, &[]).await;
+
+ if ret.is_ok() {
+ self.invalidate_key(key).await;
+ }
+
+ ret
+ }
+
+ async fn delete_range(&self, _key: &[u8], _end: &[u8]) -> Result<()> {
+ // TODO(fys): implement it
+ unimplemented!()
+ }
+
+ async fn compare_and_set(
+ &self,
+ key: &[u8],
+ expect: &[u8],
+ val: &[u8],
+ ) -> Result<std::result::Result<(), Option<Vec<u8>>>> {
+ let ret = self.kv_backend.compare_and_set(key, expect, val).await;
+
+ if ret.is_ok() {
+ self.invalidate_key(key).await;
+ }
+
+ ret
+ }
+}
+
+#[async_trait::async_trait]
+impl KvCacheInvalidator for CachedMetaKvBackend {
+ async fn invalidate_key(&self, key: &[u8]) {
+ self.cache.invalidate(key).await
+ }
+}
+
+impl CachedMetaKvBackend {
+ pub fn new(client: Arc<MetaClient>) -> Self {
+ let cache = Arc::new(
+ CacheBuilder::new(CACHE_MAX_CAPACITY)
+ .time_to_live(Duration::from_secs(CACHE_TTL_SECOND))
+ .time_to_idle(Duration::from_secs(CACHE_TTI_SECOND))
+ .build(),
+ );
+ let kv_backend = Arc::new(MetaKvBackend { client });
+
+ Self { kv_backend, cache }
+ }
+
+ pub fn wrap(kv_backend: KvBackendRef) -> Self {
+ let cache = Arc::new(
+ CacheBuilder::new(CACHE_MAX_CAPACITY)
+ .time_to_live(Duration::from_secs(CACHE_TTL_SECOND))
+ .time_to_idle(Duration::from_secs(CACHE_TTI_SECOND))
+ .build(),
+ );
+
+ Self { kv_backend, cache }
+ }
+}
+
#[derive(Debug)]
pub struct MetaKvBackend {
pub client: Arc<MetaClient>,
@@ -51,7 +160,7 @@ impl KvBackend for MetaKvBackend {
}))
}
- async fn get(&self, key: &[u8]) -> Result<Option<Kv>, Error> {
+ async fn get(&self, key: &[u8]) -> Result<Option<Kv>> {
let mut response = self
.client
.range(RangeRequest::new().with_key(key))
@@ -63,7 +172,7 @@ impl KvBackend for MetaKvBackend {
.map(|kv| Kv(kv.take_key(), kv.take_value())))
}
- async fn set(&self, key: &[u8], val: &[u8]) -> Result<(), Error> {
+ async fn set(&self, key: &[u8], val: &[u8]) -> Result<()> {
let req = PutRequest::new()
.with_key(key.to_vec())
.with_value(val.to_vec());
@@ -71,7 +180,7 @@ impl KvBackend for MetaKvBackend {
Ok(())
}
- async fn delete_range(&self, key: &[u8], end: &[u8]) -> Result<(), Error> {
+ async fn delete_range(&self, key: &[u8], end: &[u8]) -> Result<()> {
let req = DeleteRangeRequest::new().with_range(key.to_vec(), end.to_vec());
let resp = self.client.delete_range(req).await.context(MetaSrvSnafu)?;
info!(
@@ -89,7 +198,7 @@ impl KvBackend for MetaKvBackend {
key: &[u8],
expect: &[u8],
val: &[u8],
- ) -> Result<Result<(), Option<Vec<u8>>>, Error> {
+ ) -> Result<std::result::Result<(), Option<Vec<u8>>>> {
let request = CompareAndPutRequest::new()
.with_key(key.to_vec())
.with_expect(expect.to_vec())
diff --git a/src/catalog/tests/mock.rs b/src/catalog/tests/mock.rs
index b28094351afc..70937bb535d0 100644
--- a/src/catalog/tests/mock.rs
+++ b/src/catalog/tests/mock.rs
@@ -139,12 +139,16 @@ impl KvBackend for MockKvBackend {
}
async fn delete_range(&self, key: &[u8], end: &[u8]) -> Result<(), Error> {
- let start = key.to_vec();
- let end = end.to_vec();
- let range = start..end;
-
let mut map = self.map.write().await;
- map.retain(|k, _| !range.contains(k));
+ if end.is_empty() {
+ let _ = map.remove(key);
+ } else {
+ let start = key.to_vec();
+ let end = end.to_vec();
+ let range = start..end;
+
+ map.retain(|k, _| !range.contains(k));
+ }
Ok(())
}
}
diff --git a/src/catalog/tests/remote_catalog_tests.rs b/src/catalog/tests/remote_catalog_tests.rs
index 42bc41f7e0b9..f577844baeb8 100644
--- a/src/catalog/tests/remote_catalog_tests.rs
+++ b/src/catalog/tests/remote_catalog_tests.rs
@@ -24,7 +24,8 @@ mod tests {
use catalog::helper::{CatalogKey, CatalogValue, SchemaKey, SchemaValue};
use catalog::remote::{
- KvBackend, KvBackendRef, RemoteCatalogManager, RemoteCatalogProvider, RemoteSchemaProvider,
+ CachedMetaKvBackend, KvBackend, KvBackendRef, RemoteCatalogManager, RemoteCatalogProvider,
+ RemoteSchemaProvider,
};
use catalog::{CatalogManager, RegisterTableRequest};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE};
@@ -76,6 +77,52 @@ mod tests {
);
}
+ #[tokio::test]
+ async fn test_cached_backend() {
+ common_telemetry::init_default_ut_logging();
+ let backend = CachedMetaKvBackend::wrap(Arc::new(MockKvBackend::default()));
+
+ let default_catalog_key = CatalogKey {
+ catalog_name: DEFAULT_CATALOG_NAME.to_string(),
+ }
+ .to_string();
+
+ backend
+ .set(
+ default_catalog_key.as_bytes(),
+ &CatalogValue {}.as_bytes().unwrap(),
+ )
+ .await
+ .unwrap();
+
+ let ret = backend.get(b"__c-greptime").await.unwrap();
+ assert!(ret.is_some());
+
+ let _ = backend
+ .compare_and_set(
+ b"__c-greptime",
+ &CatalogValue {}.as_bytes().unwrap(),
+ b"123",
+ )
+ .await
+ .unwrap();
+
+ let ret = backend.get(b"__c-greptime").await.unwrap();
+ assert!(ret.is_some());
+ assert_eq!(&b"123"[..], &(ret.as_ref().unwrap().1));
+
+ let _ = backend.set(b"__c-greptime", b"1234").await;
+
+ let ret = backend.get(b"__c-greptime").await.unwrap();
+ assert!(ret.is_some());
+ assert_eq!(&b"1234"[..], &(ret.as_ref().unwrap().1));
+
+ backend.delete(b"__c-greptime").await.unwrap();
+
+ let ret = backend.get(b"__c-greptime").await.unwrap();
+ assert!(ret.is_none());
+ }
+
async fn prepare_components(
node_id: u64,
) -> (
@@ -84,17 +131,22 @@ mod tests {
Arc<RemoteCatalogManager>,
TableEngineManagerRef,
) {
- let backend = Arc::new(MockKvBackend::default()) as KvBackendRef;
+ let cached_backend = Arc::new(CachedMetaKvBackend::wrap(
+ Arc::new(MockKvBackend::default()),
+ ));
+
let table_engine = Arc::new(MockTableEngine::default());
let engine_manager = Arc::new(MemoryTableEngineManager::alias(
MITO_ENGINE.to_string(),
table_engine.clone(),
));
+
let catalog_manager =
- RemoteCatalogManager::new(engine_manager.clone(), node_id, backend.clone());
+ RemoteCatalogManager::new(engine_manager.clone(), node_id, cached_backend.clone());
catalog_manager.start().await.unwrap();
+
(
- backend,
+ cached_backend,
table_engine,
Arc::new(catalog_manager),
engine_manager as Arc<_>,
diff --git a/src/cmd/src/cli/repl.rs b/src/cmd/src/cli/repl.rs
index 1ae27a84761f..514a36f51acd 100644
--- a/src/cmd/src/cli/repl.rs
+++ b/src/cmd/src/cli/repl.rs
@@ -16,7 +16,7 @@ use std::path::PathBuf;
use std::sync::Arc;
use std::time::Instant;
-use catalog::remote::MetaKvBackend;
+use catalog::remote::CachedMetaKvBackend;
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_error::prelude::ErrorExt;
use common_query::Output;
@@ -253,9 +253,7 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
.context(StartMetaClientSnafu)?;
let meta_client = Arc::new(meta_client);
- let backend = Arc::new(MetaKvBackend {
- client: meta_client.clone(),
- });
+ let cached_meta_backend = Arc::new(CachedMetaKvBackend::new(meta_client.clone()));
let table_routes = Arc::new(TableRoutes::new(meta_client));
let partition_manager = Arc::new(PartitionRuleManager::new(table_routes));
@@ -263,7 +261,8 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
let datanode_clients = Arc::new(DatanodeClients::default());
let catalog_list = Arc::new(FrontendCatalogManager::new(
- backend,
+ cached_meta_backend.clone(),
+ cached_meta_backend,
partition_manager,
datanode_clients,
));
diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs
index 8ac6ae38b5b3..5903359a2766 100644
--- a/src/datanode/src/instance.rs
+++ b/src/datanode/src/instance.rs
@@ -18,7 +18,7 @@ use std::time::Duration;
use std::{fs, path};
use api::v1::meta::Role;
-use catalog::remote::MetaKvBackend;
+use catalog::remote::CachedMetaKvBackend;
use catalog::{CatalogManager, CatalogManagerRef, RegisterTableRequest};
use common_base::paths::{CLUSTER_DIR, WAL_DIR};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
@@ -186,12 +186,14 @@ impl Instance {
}
Mode::Distributed => {
+ let kv_backend = Arc::new(CachedMetaKvBackend::new(
+ meta_client.as_ref().unwrap().clone(),
+ ));
+
let catalog = Arc::new(catalog::remote::RemoteCatalogManager::new(
engine_manager.clone(),
opts.node_id.context(MissingNodeIdSnafu)?,
- Arc::new(MetaKvBackend {
- client: meta_client.as_ref().unwrap().clone(),
- }),
+ kv_backend,
));
(catalog as CatalogManagerRef, None)
}
diff --git a/src/frontend/src/catalog.rs b/src/frontend/src/catalog.rs
index e90c1832ed96..58a58d62a1c9 100644
--- a/src/frontend/src/catalog.rs
+++ b/src/frontend/src/catalog.rs
@@ -26,7 +26,7 @@ use catalog::helper::{
build_catalog_prefix, build_schema_prefix, build_table_global_prefix, CatalogKey, SchemaKey,
TableGlobalKey, TableGlobalValue,
};
-use catalog::remote::{Kv, KvBackendRef};
+use catalog::remote::{Kv, KvBackendRef, KvCacheInvalidatorRef};
use catalog::{
CatalogManager, CatalogProvider, CatalogProviderRef, DeregisterTableRequest,
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
@@ -51,6 +51,7 @@ use crate::table::DistTable;
#[derive(Clone)]
pub struct FrontendCatalogManager {
backend: KvBackendRef,
+ backend_cache_invalidtor: KvCacheInvalidatorRef,
partition_manager: PartitionRuleManagerRef,
datanode_clients: Arc<DatanodeClients>,
@@ -64,11 +65,13 @@ pub struct FrontendCatalogManager {
impl FrontendCatalogManager {
pub fn new(
backend: KvBackendRef,
+ backend_cache_invalidtor: KvCacheInvalidatorRef,
partition_manager: PartitionRuleManagerRef,
datanode_clients: Arc<DatanodeClients>,
) -> Self {
Self {
backend,
+ backend_cache_invalidtor,
partition_manager,
datanode_clients,
dist_instance: None,
@@ -90,6 +93,19 @@ impl FrontendCatalogManager {
pub fn datanode_clients(&self) -> Arc<DatanodeClients> {
self.datanode_clients.clone()
}
+
+ pub async fn invalidate_table(&self, catalog: &str, schema: &str, table: &str) {
+ let tg_key = TableGlobalKey {
+ catalog_name: catalog.into(),
+ schema_name: schema.into(),
+ table_name: table.into(),
+ }
+ .to_string();
+
+ let tg_key = tg_key.as_bytes();
+
+ self.backend_cache_invalidtor.invalidate_key(tg_key).await;
+ }
}
// FIXME(hl): Frontend only needs a CatalogList, should replace with trait upcasting
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index 741f6ed98b35..a550ac57e102 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -30,7 +30,7 @@ use api::v1::greptime_request::Request;
use api::v1::meta::Role;
use api::v1::{AddColumns, AlterExpr, Column, DdlRequest, InsertRequest};
use async_trait::async_trait;
-use catalog::remote::MetaKvBackend;
+use catalog::remote::CachedMetaKvBackend;
use catalog::CatalogManagerRef;
use common_base::Plugins;
use common_catalog::consts::MITO_ENGINE;
@@ -137,14 +137,16 @@ impl Instance {
datanode_clients: Arc<DatanodeClients>,
plugins: Arc<Plugins>,
) -> Result<Self> {
- let meta_backend = Arc::new(MetaKvBackend {
- client: meta_client.clone(),
- });
+ let meta_backend = Arc::new(CachedMetaKvBackend::new(meta_client.clone()));
let table_routes = Arc::new(TableRoutes::new(meta_client.clone()));
let partition_manager = Arc::new(PartitionRuleManager::new(table_routes));
- let mut catalog_manager =
- FrontendCatalogManager::new(meta_backend, partition_manager, datanode_clients.clone());
+ let mut catalog_manager = FrontendCatalogManager::new(
+ meta_backend.clone(),
+ meta_backend,
+ partition_manager,
+ datanode_clients.clone(),
+ );
let dist_instance = DistInstance::new(
meta_client.clone(),
diff --git a/src/frontend/src/instance/distributed.rs b/src/frontend/src/instance/distributed.rs
index c9e6512da48f..fbeecdb5b467 100644
--- a/src/frontend/src/instance/distributed.rs
+++ b/src/frontend/src/instance/distributed.rs
@@ -202,6 +202,19 @@ impl DistInstance {
.await
.context(RequestDatanodeSnafu)?;
}
+
+ // Since the table information created on meta does not go through KvBackend, so we
+ // manually invalidate the cache here.
+ //
+ // TODO(fys): when the meta invalidation cache mechanism is established, remove it.
+ self.catalog_manager
+ .invalidate_table(
+ &table_name.catalog_name,
+ &table_name.schema_name,
+ &table_name.table_name,
+ )
+ .await;
+
Ok(table)
}
@@ -260,6 +273,18 @@ impl DistInstance {
}
}
+ // Since the table information dropped on meta does not go through KvBackend, so we
+ // manually invalidate the cache here.
+ //
+ // TODO(fys): when the meta invalidation cache mechanism is established, remove it.
+ self.catalog_manager()
+ .invalidate_table(
+ &table_name.catalog_name,
+ &table_name.schema_name,
+ &table_name.table_name,
+ )
+ .await;
+
Ok(Output::AffectedRows(1))
}
@@ -470,12 +495,15 @@ impl DistInstance {
} else {
expr.catalog_name.as_str()
};
+
let schema_name = if expr.schema_name.is_empty() {
DEFAULT_SCHEMA_NAME
} else {
expr.schema_name.as_str()
};
+
let table_name = expr.table_name.as_str();
+
let table = self
.catalog_manager
.table(catalog_name, schema_name, table_name)
@@ -489,6 +517,7 @@ impl DistInstance {
.context(AlterExprToRequestSnafu)?;
let mut context = AlterContext::with_capacity(1);
+
context.insert(expr);
table.alter(context, &request).await.context(TableSnafu)?;
|
feat
|
add cache for catalog kv backend (#1592)
|
a9e5b902fd7937d16a7768b5b903d511747725f4
|
2023-11-14 20:35:53
|
taobo
|
test: move sqlness env show test to common dir (#2748)
| false
|
diff --git a/tests/cases/distributed/show/show_create.result b/tests/cases/standalone/common/show/show_create.result
similarity index 100%
rename from tests/cases/distributed/show/show_create.result
rename to tests/cases/standalone/common/show/show_create.result
diff --git a/tests/cases/distributed/show/show_create.sql b/tests/cases/standalone/common/show/show_create.sql
similarity index 100%
rename from tests/cases/distributed/show/show_create.sql
rename to tests/cases/standalone/common/show/show_create.sql
diff --git a/tests/cases/standalone/show/show_create.result b/tests/cases/standalone/show/show_create.result
deleted file mode 100644
index e9e22f060cf3..000000000000
--- a/tests/cases/standalone/show/show_create.result
+++ /dev/null
@@ -1,63 +0,0 @@
-CREATE TABLE system_metrics (
- id INT UNSIGNED NULL,
- host STRING NULL,
- cpu DOUBLE NULL COMMENT 'cpu',
- disk FLOAT NULL,
- ts TIMESTAMP NOT NULL DEFAULT current_timestamp(),
- TIME INDEX (ts),
- PRIMARY KEY (id, host)
-)
-ENGINE=mito
-WITH(
- ttl = '7d',
- write_buffer_size = 1024
-);
-
-Affected Rows: 0
-
-SHOW CREATE TABLE system_metrics;
-
-+----------------+-----------------------------------------------------------+
-| Table | Create Table |
-+----------------+-----------------------------------------------------------+
-| system_metrics | CREATE TABLE IF NOT EXISTS "system_metrics" ( |
-| | "id" INT UNSIGNED NULL, |
-| | "host" STRING NULL, |
-| | "cpu" DOUBLE NULL COMMENT 'cpu', |
-| | "disk" FLOAT NULL, |
-| | "ts" TIMESTAMP(3) NOT NULL DEFAULT current_timestamp(), |
-| | TIME INDEX ("ts"), |
-| | PRIMARY KEY ("id", "host") |
-| | ) |
-| | |
-| | ENGINE=mito |
-| | WITH( |
-| | regions = 1, |
-| | ttl = '7days', |
-| | write_buffer_size = '1.0KiB' |
-| | ) |
-+----------------+-----------------------------------------------------------+
-
-DROP TABLE system_metrics;
-
-Affected Rows: 0
-
-CREATE TABLE not_supported_table_options_keys (
- id INT UNSIGNED,
- host STRING,
- cpu DOUBLE,
- disk FLOAT,
- n INT COMMENT 'range key',
- ts TIMESTAMP NOT NULL DEFAULT current_timestamp(),
- TIME INDEX (ts),
- PRIMARY KEY (id, host)
-)
-ENGINE=mito
-WITH(
- foo = 123,
- ttl = '7d',
- write_buffer_size = 1024
-);
-
-Error: 1004(InvalidArguments), Invalid table option key: foo
-
diff --git a/tests/cases/standalone/show/show_create.sql b/tests/cases/standalone/show/show_create.sql
deleted file mode 100644
index 86faf1a604aa..000000000000
--- a/tests/cases/standalone/show/show_create.sql
+++ /dev/null
@@ -1,35 +0,0 @@
-CREATE TABLE system_metrics (
- id INT UNSIGNED NULL,
- host STRING NULL,
- cpu DOUBLE NULL COMMENT 'cpu',
- disk FLOAT NULL,
- ts TIMESTAMP NOT NULL DEFAULT current_timestamp(),
- TIME INDEX (ts),
- PRIMARY KEY (id, host)
-)
-ENGINE=mito
-WITH(
- ttl = '7d',
- write_buffer_size = 1024
-);
-
-SHOW CREATE TABLE system_metrics;
-
-DROP TABLE system_metrics;
-
-CREATE TABLE not_supported_table_options_keys (
- id INT UNSIGNED,
- host STRING,
- cpu DOUBLE,
- disk FLOAT,
- n INT COMMENT 'range key',
- ts TIMESTAMP NOT NULL DEFAULT current_timestamp(),
- TIME INDEX (ts),
- PRIMARY KEY (id, host)
-)
-ENGINE=mito
-WITH(
- foo = 123,
- ttl = '7d',
- write_buffer_size = 1024
-);
|
test
|
move sqlness env show test to common dir (#2748)
|
6a05f617a40ae28fb8b1d22c42c40d0ce7b17ac6
|
2023-04-01 14:46:51
|
Eugene Tolbakov
|
feat(stddev_over_time): add initial implementation (#1289)
| false
|
diff --git a/src/promql/src/functions.rs b/src/promql/src/functions.rs
index ceba5c6181c7..f83c4e5d071c 100644
--- a/src/promql/src/functions.rs
+++ b/src/promql/src/functions.rs
@@ -23,7 +23,7 @@ mod test_util;
pub use aggr_over_time::{
AbsentOverTime, AvgOverTime, CountOverTime, LastOverTime, MaxOverTime, MinOverTime,
- PresentOverTime, SumOverTime,
+ PresentOverTime, StddevOverTime, SumOverTime,
};
use datafusion::arrow::array::ArrayRef;
use datafusion::error::DataFusionError;
@@ -40,3 +40,19 @@ pub(crate) fn extract_array(columnar_value: &ColumnarValue) -> Result<ArrayRef,
))
}
}
+
+/// compensation(Kahan) summation algorithm - a technique for reducing the numerical error
+/// in floating-point arithmetic. The algorithm also includes the modification ("Neumaier improvement")
+/// that reduces the numerical error further in cases
+/// where the numbers being summed have a large difference in magnitude
+/// Prometheus's implementation:
+/// https://github.com/prometheus/prometheus/blob/f55ab2217984770aa1eecd0f2d5f54580029b1c0/promql/functions.go#L782)
+pub(crate) fn compensated_sum_inc(inc: f64, sum: f64, mut compensation: f64) -> (f64, f64) {
+ let new_sum = sum + inc;
+ if sum.abs() >= inc.abs() {
+ compensation += (sum - new_sum) + inc;
+ } else {
+ compensation += (inc - new_sum) + sum;
+ }
+ (new_sum, compensation)
+}
diff --git a/src/promql/src/functions/aggr_over_time.rs b/src/promql/src/functions/aggr_over_time.rs
index 451008d42084..be164a60f0cc 100644
--- a/src/promql/src/functions/aggr_over_time.rs
+++ b/src/promql/src/functions/aggr_over_time.rs
@@ -24,7 +24,7 @@ use datatypes::arrow::array::Array;
use datatypes::arrow::compute;
use datatypes::arrow::datatypes::DataType;
-use crate::functions::extract_array;
+use crate::functions::{compensated_sum_inc, extract_array};
use crate::range_array::RangeArray;
/// The average value of all points in the specified interval.
@@ -117,7 +117,42 @@ pub fn present_over_time(_: &TimestampMillisecondArray, values: &Float64Array) -
}
}
-// TODO(ruihang): support quantile_over_time, stddev_over_time, and stdvar_over_time
+// TODO(ruihang): support quantile_over_time, and stdvar_over_time
+
+/// the population standard deviation of the values in the specified interval.
+/// Prometheus's implementation: https://github.com/prometheus/prometheus/blob/f55ab2217984770aa1eecd0f2d5f54580029b1c0/promql/functions.go#L556-L569
+#[range_fn(
+ name = "StddevOverTime",
+ ret = "Float64Array",
+ display_name = "prom_stddev_over_time"
+)]
+pub fn stddev_over_time(_: &TimestampMillisecondArray, values: &Float64Array) -> Option<f64> {
+ if values.is_empty() {
+ None
+ } else {
+ let mut count = 0.0;
+ let mut mean = 0.0;
+ let mut comp_mean = 0.0;
+ let mut deviations_sum_sq = 0.0;
+ let mut comp_deviations_sum_sq = 0.0;
+ for v in values {
+ count += 1.0;
+ let current_value = v.unwrap();
+ let delta = current_value - (mean + comp_mean);
+ let (new_mean, new_comp_mean) = compensated_sum_inc(delta / count, mean, comp_mean);
+ mean = new_mean;
+ comp_mean = new_comp_mean;
+ let (new_deviations_sum_sq, new_comp_deviations_sum_sq) = compensated_sum_inc(
+ delta * (current_value - (mean + comp_mean)),
+ deviations_sum_sq,
+ comp_deviations_sum_sq,
+ );
+ deviations_sum_sq = new_deviations_sum_sq;
+ comp_deviations_sum_sq = new_comp_deviations_sum_sq;
+ }
+ Some(((deviations_sum_sq + comp_deviations_sum_sq) / count).sqrt())
+ }
+}
#[cfg(test)]
mod test {
@@ -332,4 +367,50 @@ mod test {
],
);
}
+
+ #[test]
+ fn calculate_std_dev_over_time() {
+ let (ts_array, value_array) = build_test_range_arrays();
+ simple_range_udf_runner(
+ StddevOverTime::scalar_udf(),
+ ts_array,
+ value_array,
+ vec![
+ Some(37.6543215),
+ Some(28.442923895289123),
+ Some(0.0),
+ None,
+ None,
+ Some(18.12081352042062),
+ Some(11.983172291869804),
+ Some(11.441953741554055),
+ Some(0.0),
+ None,
+ ],
+ );
+
+ // add more assertions
+ let ts_array = Arc::new(TimestampMillisecondArray::from_iter(
+ [1000i64, 3000, 5000, 7000, 9000, 11000, 13000, 15000]
+ .into_iter()
+ .map(Some),
+ ));
+ let values_array = Arc::new(Float64Array::from_iter([
+ 1.5990505637277868,
+ 1.5990505637277868,
+ 1.5990505637277868,
+ 0.0,
+ 8.0,
+ 8.0,
+ 2.0,
+ 3.0,
+ ]));
+ let ranges = [(0, 3), (3, 5)];
+ simple_range_udf_runner(
+ StddevOverTime::scalar_udf(),
+ RangeArray::from_ranges(ts_array, ranges).unwrap(),
+ RangeArray::from_ranges(values_array, ranges).unwrap(),
+ vec![Some(0.0), Some(3.249615361854384)],
+ );
+ }
}
|
feat
|
add initial implementation (#1289)
|
ccb1978c98b6c5e687af7b8530c053bebae3ae7f
|
2025-02-10 08:58:34
|
yihong
|
fix: close issue #5466 by do not shortcut the drop command (#5467)
| false
|
diff --git a/src/datanode/src/region_server.rs b/src/datanode/src/region_server.rs
index 43baf32ec323..f1eb63c51b03 100644
--- a/src/datanode/src/region_server.rs
+++ b/src/datanode/src/region_server.rs
@@ -661,7 +661,7 @@ impl RegionServerInner {
}
}
Err(e) => {
- self.unset_region_status(region_id, *region_change);
+ self.unset_region_status(region_id, &engine, *region_change);
error!(e; "Failed to open region: {}", region_id);
errors.push(e);
}
@@ -670,7 +670,7 @@ impl RegionServerInner {
}
Err(e) => {
for (®ion_id, region_change) in ®ion_changes {
- self.unset_region_status(region_id, *region_change);
+ self.unset_region_status(region_id, &engine, *region_change);
}
error!(e; "Failed to open batch regions");
errors.push(BoxedError::new(e));
@@ -780,7 +780,7 @@ impl RegionServerInner {
}
Err(err) => {
// Removes the region status if the operation fails.
- self.unset_region_status(region_id, region_change);
+ self.unset_region_status(region_id, &engine, region_change);
Err(err)
}
}
@@ -809,12 +809,21 @@ impl RegionServerInner {
}
}
- fn unset_region_status(&self, region_id: RegionId, region_change: RegionChange) {
+ fn unset_region_status(
+ &self,
+ region_id: RegionId,
+ engine: &RegionEngineRef,
+ region_change: RegionChange,
+ ) {
match region_change {
RegionChange::None => {}
- RegionChange::Register(_) | RegionChange::Deregisters => {
+ RegionChange::Register(_) => {
self.region_map.remove(®ion_id);
}
+ RegionChange::Deregisters => {
+ self.region_map
+ .insert(region_id, RegionEngineWithStatus::Ready(engine.clone()));
+ }
RegionChange::Catchup => {}
}
}
@@ -1195,7 +1204,7 @@ mod tests {
.unwrap_err();
let status = mock_region_server.inner.region_map.get(®ion_id);
- assert!(status.is_none());
+ assert!(status.is_some());
}
struct CurrentEngineTest {
|
fix
|
close issue #5466 by do not shortcut the drop command (#5467)
|
3b701d8f5e97cd4a4e1a7a44d19633c211fce740
|
2024-08-04 13:59:31
|
shuiyisong
|
test: more on processors (#4493)
| false
|
diff --git a/src/pipeline/tests/date.rs b/src/pipeline/tests/date.rs
new file mode 100644
index 000000000000..775f0688c146
--- /dev/null
+++ b/src/pipeline/tests/date.rs
@@ -0,0 +1,138 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+mod common;
+
+use api::v1::ColumnSchema;
+use greptime_proto::v1::value::ValueData;
+use greptime_proto::v1::{ColumnDataType, SemanticType};
+use lazy_static::lazy_static;
+
+const TEST_INPUT: &str = r#"
+{
+ "input_str": "2024-06-27T06:13:36.991Z"
+}"#;
+
+const TEST_VALUE: Option<ValueData> =
+ Some(ValueData::TimestampNanosecondValue(1719468816991000000));
+
+lazy_static! {
+ static ref EXPECTED_SCHEMA: Vec<ColumnSchema> = vec![
+ common::make_column_schema(
+ "ts".to_string(),
+ ColumnDataType::TimestampNanosecond,
+ SemanticType::Field,
+ ),
+ common::make_column_schema(
+ "greptime_timestamp".to_string(),
+ ColumnDataType::TimestampNanosecond,
+ SemanticType::Timestamp,
+ ),
+ ];
+}
+
+#[test]
+fn test_parse_date() {
+ let pipeline_yaml = r#"
+processors:
+ - date:
+ fields:
+ - input_str
+ formats:
+ - "%Y-%m-%dT%H:%M:%S%.3fZ"
+
+transform:
+ - fields:
+ - input_str, ts
+ type: time
+"#;
+
+ let output = common::parse_and_exec(TEST_INPUT, pipeline_yaml);
+ assert_eq!(output.schema, *EXPECTED_SCHEMA);
+ assert_eq!(output.rows[0].values[0].value_data, TEST_VALUE);
+}
+
+#[test]
+fn test_multi_formats() {
+ let pipeline_yaml = r#"
+processors:
+ - date:
+ fields:
+ - input_str
+ formats:
+ - "%Y-%m-%dT%H:%M:%S"
+ - "%Y-%m-%dT%H:%M:%S%.3fZ"
+
+transform:
+ - fields:
+ - input_str, ts
+ type: time
+"#;
+
+ let output = common::parse_and_exec(TEST_INPUT, pipeline_yaml);
+ assert_eq!(output.schema, *EXPECTED_SCHEMA);
+ assert_eq!(output.rows[0].values[0].value_data, TEST_VALUE);
+}
+
+#[test]
+fn test_ignore_missing() {
+ let empty_input = r#"{}"#;
+
+ let pipeline_yaml = r#"
+processors:
+ - date:
+ fields:
+ - input_str
+ formats:
+ - "%Y-%m-%dT%H:%M:%S"
+ - "%Y-%m-%dT%H:%M:%S%.3fZ"
+ ignore_missing: true
+
+transform:
+ - fields:
+ - input_str, ts
+ type: time
+"#;
+
+ let output = common::parse_and_exec(empty_input, pipeline_yaml);
+ assert_eq!(output.schema, *EXPECTED_SCHEMA);
+ assert_eq!(output.rows[0].values[0].value_data, None);
+}
+
+#[test]
+fn test_timezone() {
+ let pipeline_yaml = r#"
+processors:
+ - date:
+ fields:
+ - input_str
+ formats:
+ - "%Y-%m-%dT%H:%M:%S"
+ - "%Y-%m-%dT%H:%M:%S%.3fZ"
+ ignore_missing: true
+ timezone: 'Asia/Shanghai'
+
+transform:
+ - fields:
+ - input_str, ts
+ type: time
+"#;
+
+ let output = common::parse_and_exec(TEST_INPUT, pipeline_yaml);
+ assert_eq!(output.schema, *EXPECTED_SCHEMA);
+ assert_eq!(
+ output.rows[0].values[0].value_data,
+ Some(ValueData::TimestampNanosecondValue(1719440016991000000))
+ );
+}
diff --git a/src/pipeline/tests/dissect.rs b/src/pipeline/tests/dissect.rs
index bc9ca263ca40..10f9e2799616 100644
--- a/src/pipeline/tests/dissect.rs
+++ b/src/pipeline/tests/dissect.rs
@@ -17,6 +17,10 @@ mod common;
use greptime_proto::v1::value::ValueData::StringValue;
use greptime_proto::v1::{ColumnDataType, SemanticType};
+fn make_string_column_schema(name: String) -> greptime_proto::v1::ColumnSchema {
+ common::make_column_schema(name, ColumnDataType::String, SemanticType::Field)
+}
+
#[test]
fn test_dissect_pattern() {
let input_value_str = r#"
@@ -43,8 +47,8 @@ transform:
let output = common::parse_and_exec(input_value_str, pipeline_yaml);
let expected_schema = vec![
- common::make_column_schema("a".to_string(), ColumnDataType::String, SemanticType::Field),
- common::make_column_schema("b".to_string(), ColumnDataType::String, SemanticType::Field),
+ make_string_column_schema("a".to_string()),
+ make_string_column_schema("b".to_string()),
common::make_column_schema(
"greptime_timestamp".to_string(),
ColumnDataType::TimestampNanosecond,
@@ -91,8 +95,8 @@ transform:
let output = common::parse_and_exec(input_value_str, pipeline_yaml);
let expected_schema = vec![
- common::make_column_schema("a".to_string(), ColumnDataType::String, SemanticType::Field),
- common::make_column_schema("b".to_string(), ColumnDataType::String, SemanticType::Field),
+ make_string_column_schema("a".to_string()),
+ make_string_column_schema("b".to_string()),
common::make_column_schema(
"greptime_timestamp".to_string(),
ColumnDataType::TimestampNanosecond,
@@ -111,3 +115,141 @@ transform:
Some(StringValue("456".to_string()))
);
}
+
+#[test]
+fn test_ignore_missing() {
+ let empty_str = r#"{}"#;
+
+ let pipeline_yaml = r#"
+processors:
+ - dissect:
+ field: str
+ patterns:
+ - "%{a} %{b}"
+ ignore_missing: true
+
+transform:
+ - fields:
+ - a
+ - b
+ type: string
+"#;
+
+ let output = common::parse_and_exec(empty_str, pipeline_yaml);
+
+ let expected_schema = vec![
+ make_string_column_schema("a".to_string()),
+ make_string_column_schema("b".to_string()),
+ common::make_column_schema(
+ "greptime_timestamp".to_string(),
+ ColumnDataType::TimestampNanosecond,
+ SemanticType::Timestamp,
+ ),
+ ];
+
+ assert_eq!(output.schema, expected_schema);
+
+ assert_eq!(output.rows[0].values[0].value_data, None);
+ assert_eq!(output.rows[0].values[1].value_data, None);
+}
+
+#[test]
+fn test_modifier() {
+ let empty_str = r#"
+{
+ "str": "key1 key2 key3 key4 key5 key6 key7 key8"
+}"#;
+
+ let pipeline_yaml = r#"
+processors:
+ - dissect:
+ field: str
+ patterns:
+ - "%{key1} %{key2} %{+key3} %{+key3/2} %{key5->} %{?key6} %{*key_7} %{&key_7}"
+
+transform:
+ - fields:
+ - key1
+ - key2
+ - key3
+ - key5
+ - key7
+ type: string
+"#;
+
+ let output = common::parse_and_exec(empty_str, pipeline_yaml);
+
+ let expected_schema = vec![
+ make_string_column_schema("key1".to_string()),
+ make_string_column_schema("key2".to_string()),
+ make_string_column_schema("key3".to_string()),
+ make_string_column_schema("key5".to_string()),
+ make_string_column_schema("key7".to_string()),
+ common::make_column_schema(
+ "greptime_timestamp".to_string(),
+ ColumnDataType::TimestampNanosecond,
+ SemanticType::Timestamp,
+ ),
+ ];
+
+ assert_eq!(output.schema, expected_schema);
+ assert_eq!(
+ output.rows[0].values[0].value_data,
+ Some(StringValue("key1".to_string()))
+ );
+ assert_eq!(
+ output.rows[0].values[1].value_data,
+ Some(StringValue("key2".to_string()))
+ );
+ assert_eq!(
+ output.rows[0].values[2].value_data,
+ Some(StringValue("key3 key4".to_string()))
+ );
+ assert_eq!(
+ output.rows[0].values[3].value_data,
+ Some(StringValue("key5".to_string()))
+ );
+ assert_eq!(
+ output.rows[0].values[4].value_data,
+ Some(StringValue("key8".to_string()))
+ );
+}
+
+#[test]
+fn test_append_separator() {
+ let empty_str = r#"
+{
+ "str": "key1 key2"
+}"#;
+
+ let pipeline_yaml = r#"
+processors:
+ - dissect:
+ field: str
+ patterns:
+ - "%{+key1} %{+key1}"
+ append_separator: "_"
+
+transform:
+ - fields:
+ - key1
+ type: string
+"#;
+
+ let output = common::parse_and_exec(empty_str, pipeline_yaml);
+
+ let expected_schema = vec![
+ make_string_column_schema("key1".to_string()),
+ common::make_column_schema(
+ "greptime_timestamp".to_string(),
+ ColumnDataType::TimestampNanosecond,
+ SemanticType::Timestamp,
+ ),
+ ];
+
+ assert_eq!(output.schema, expected_schema);
+ assert_eq!(
+ output.rows[0].values[0].value_data,
+ Some(StringValue("key1_key2".to_string()))
+ );
+}
diff --git a/src/pipeline/tests/epoch.rs b/src/pipeline/tests/epoch.rs
new file mode 100644
index 000000000000..35a2ab635c00
--- /dev/null
+++ b/src/pipeline/tests/epoch.rs
@@ -0,0 +1,255 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+mod common;
+
+use api::v1::ColumnSchema;
+use greptime_proto::v1::value::ValueData;
+use greptime_proto::v1::{ColumnDataType, SemanticType};
+
+#[test]
+fn test_parse_epoch() {
+ let test_input = r#"
+ {
+ "input_s": "1722580862",
+ "input_sec": "1722580862",
+ "input_second": "1722580862",
+ "input_ms": "1722580887794",
+ "input_millisecond": "1722580887794",
+ "input_milli": "1722580887794",
+ "input_default": "1722580887794",
+ "input_us": "1722580905423969",
+ "input_microsecond": "1722580905423969",
+ "input_micro": "1722580905423969",
+ "input_ns": "1722580929863842048",
+ "input_nanosecond": "1722580929863842048",
+ "input_nano": "1722580929863842048"
+ }"#;
+
+ let pipeline_yaml = r#"
+processors:
+ - epoch:
+ field: input_s
+ resolution: s
+ - epoch:
+ field: input_sec
+ resolution: sec
+ - epoch:
+ field: input_second
+ resolution: second
+ - epoch:
+ field: input_ms
+ resolution: ms
+ - epoch:
+ field: input_millisecond
+ resolution: millisecond
+ - epoch:
+ field: input_milli
+ resolution: milli
+ - epoch:
+ field: input_default
+ - epoch:
+ field: input_us
+ resolution: us
+ - epoch:
+ field: input_microsecond
+ resolution: microsecond
+ - epoch:
+ field: input_micro
+ resolution: micro
+ - epoch:
+ field: input_ns
+ resolution: ns
+ - epoch:
+ field: input_nanosecond
+ resolution: nanosecond
+ - epoch:
+ field: input_nano
+ resolution: nano
+
+transform:
+ - field: input_s
+ type: epoch, s
+ - field: input_sec
+ type: epoch, sec
+ - field: input_second
+ type: epoch, second
+
+ - field: input_ms
+ type: epoch, ms
+ - field: input_millisecond
+ type: epoch, millisecond
+ - field: input_milli
+ type: epoch, milli
+ - field: input_default
+ type: epoch, milli
+
+ - field: input_us
+ type: epoch, us
+ - field: input_microsecond
+ type: epoch, microsecond
+ - field: input_micro
+ type: epoch, micro
+
+ - field: input_ns
+ type: epoch, ns
+ - field: input_nanosecond
+ type: epoch, nanosecond
+ - field: input_nano
+ type: epoch, nano
+"#;
+ fn make_time_field(name: &str, datatype: ColumnDataType) -> ColumnSchema {
+ common::make_column_schema(name.to_string(), datatype, SemanticType::Field)
+ }
+
+ let expected_schema = vec![
+ make_time_field("input_s", ColumnDataType::TimestampSecond),
+ make_time_field("input_sec", ColumnDataType::TimestampSecond),
+ make_time_field("input_second", ColumnDataType::TimestampSecond),
+ make_time_field("input_ms", ColumnDataType::TimestampMillisecond),
+ make_time_field("input_millisecond", ColumnDataType::TimestampMillisecond),
+ make_time_field("input_milli", ColumnDataType::TimestampMillisecond),
+ make_time_field("input_default", ColumnDataType::TimestampMillisecond),
+ make_time_field("input_us", ColumnDataType::TimestampMicrosecond),
+ make_time_field("input_microsecond", ColumnDataType::TimestampMicrosecond),
+ make_time_field("input_micro", ColumnDataType::TimestampMicrosecond),
+ make_time_field("input_ns", ColumnDataType::TimestampNanosecond),
+ make_time_field("input_nanosecond", ColumnDataType::TimestampNanosecond),
+ make_time_field("input_nano", ColumnDataType::TimestampNanosecond),
+ common::make_column_schema(
+ "greptime_timestamp".to_string(),
+ ColumnDataType::TimestampNanosecond,
+ SemanticType::Timestamp,
+ ),
+ ];
+
+ let output = common::parse_and_exec(test_input, pipeline_yaml);
+ assert_eq!(output.schema, expected_schema);
+
+ for i in 0..2 {
+ assert_eq!(
+ output.rows[0].values[i].value_data,
+ Some(ValueData::TimestampSecondValue(1722580862))
+ );
+ }
+ for i in 3..6 {
+ assert_eq!(
+ output.rows[0].values[i].value_data,
+ Some(ValueData::TimestampMillisecondValue(1722580887794))
+ );
+ }
+ for i in 7..9 {
+ assert_eq!(
+ output.rows[0].values[i].value_data,
+ Some(ValueData::TimestampMicrosecondValue(1722580905423969))
+ );
+ }
+ for i in 10..12 {
+ assert_eq!(
+ output.rows[0].values[i].value_data,
+ Some(ValueData::TimestampNanosecondValue(1722580929863842048))
+ );
+ }
+}
+
+#[test]
+fn test_ignore_missing() {
+ let empty_input = r#"{}"#;
+
+ let pipeline_yaml = r#"
+processors:
+ - epoch:
+ field: input_s
+ resolution: s
+ ignore_missing: true
+
+transform:
+ - fields:
+ - input_s, ts
+ type: epoch, s
+"#;
+
+ let expected_schema = vec![
+ common::make_column_schema(
+ "ts".to_string(),
+ ColumnDataType::TimestampSecond,
+ SemanticType::Field,
+ ),
+ common::make_column_schema(
+ "greptime_timestamp".to_string(),
+ ColumnDataType::TimestampNanosecond,
+ SemanticType::Timestamp,
+ ),
+ ];
+
+ let output = common::parse_and_exec(empty_input, pipeline_yaml);
+ assert_eq!(output.schema, expected_schema);
+ assert_eq!(output.rows[0].values[0].value_data, None);
+}
+
+#[test]
+fn test_default_wrong_resolution() {
+ let test_input = r#"
+ {
+ "input_s": "1722580862",
+ "input_nano": "1722583122284583936"
+ }"#;
+
+ let pipeline_yaml = r#"
+processors:
+ - epoch:
+ fields:
+ - input_s
+ - input_nano
+
+transform:
+ - fields:
+ - input_s
+ type: epoch, s
+ - fields:
+ - input_nano
+ type: epoch, nano
+"#;
+
+ let expected_schema = vec![
+ common::make_column_schema(
+ "input_s".to_string(),
+ ColumnDataType::TimestampSecond,
+ SemanticType::Field,
+ ),
+ common::make_column_schema(
+ "input_nano".to_string(),
+ ColumnDataType::TimestampNanosecond,
+ SemanticType::Field,
+ ),
+ common::make_column_schema(
+ "greptime_timestamp".to_string(),
+ ColumnDataType::TimestampNanosecond,
+ SemanticType::Timestamp,
+ ),
+ ];
+
+ let output = common::parse_and_exec(test_input, pipeline_yaml);
+ assert_eq!(output.schema, expected_schema);
+ // this is actually wrong
+ // TODO(shuiyisong): add check for type when converting epoch
+ assert_eq!(
+ output.rows[0].values[0].value_data,
+ Some(ValueData::TimestampMillisecondValue(1722580862))
+ );
+ assert_eq!(
+ output.rows[0].values[1].value_data,
+ Some(ValueData::TimestampMillisecondValue(1722583122284583936))
+ );
+}
diff --git a/src/pipeline/tests/gsub.rs b/src/pipeline/tests/gsub.rs
index 2f336923e8b6..b7044b9a1834 100644
--- a/src/pipeline/tests/gsub.rs
+++ b/src/pipeline/tests/gsub.rs
@@ -61,3 +61,37 @@ transform:
Some(TimestampMillisecondValue(1573840000000))
);
}
+
+#[test]
+fn test_ignore_missing() {
+ let empty_string = r#"{}"#;
+
+ let pipeline_yaml = r#"
+processors:
+ - gsub:
+ field: reqTimeSec
+ pattern: "\\."
+ replacement: ""
+ ignore_missing: true
+ - epoch:
+ field: reqTimeSec
+ resolution: millisecond
+ ignore_missing: true
+
+transform:
+ - field: reqTimeSec
+ type: epoch, millisecond
+ index: timestamp
+"#;
+
+ let output = common::parse_and_exec(empty_string, pipeline_yaml);
+
+ let expected_schema = vec![common::make_column_schema(
+ "reqTimeSec".to_string(),
+ ColumnDataType::TimestampMillisecond,
+ SemanticType::Timestamp,
+ )];
+
+ assert_eq!(output.schema, expected_schema);
+ assert_eq!(output.rows[0].values[0].value_data, None);
+}
diff --git a/src/pipeline/tests/join.rs b/src/pipeline/tests/join.rs
index 9ffa35909c76..3625160361a6 100644
--- a/src/pipeline/tests/join.rs
+++ b/src/pipeline/tests/join.rs
@@ -117,3 +117,41 @@ fn test_float() {
Some(StringValue("1.1-1.2-1.3".to_string()))
);
}
+
+#[test]
+fn test_mix_type() {
+ let input_value_str = r#"
+ [
+ {
+ "join_test": [1, true, "a", 1.1]
+ }
+ ]
+"#;
+ let output = common::parse_and_exec(input_value_str, PIPELINE_YAML);
+
+ assert_eq!(output.schema, *EXPECTED_SCHEMA);
+ assert_eq!(
+ output.rows[0].values[0].value_data,
+ Some(StringValue("1-true-a-1.1".to_string()))
+ );
+}
+
+#[test]
+fn test_ignore_missing() {
+ let empty_string = r#"{}"#;
+ let pipeline_yaml = r#"
+processors:
+ - join:
+ field: join_test
+ separator: "-"
+ ignore_missing: true
+
+transform:
+ - field: join_test
+ type: string
+"#;
+ let output = common::parse_and_exec(empty_string, pipeline_yaml);
+
+ assert_eq!(output.schema, *EXPECTED_SCHEMA);
+ assert_eq!(output.rows[0].values[0].value_data, None);
+}
diff --git a/src/pipeline/tests/letter.rs b/src/pipeline/tests/letter.rs
new file mode 100644
index 000000000000..d6d9a2cccbf1
--- /dev/null
+++ b/src/pipeline/tests/letter.rs
@@ -0,0 +1,188 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+mod common;
+
+use api::v1::ColumnSchema;
+use greptime_proto::v1::value::ValueData;
+use greptime_proto::v1::{ColumnDataType, SemanticType};
+use lazy_static::lazy_static;
+
+lazy_static! {
+ static ref EXPECTED_SCHEMA: Vec<ColumnSchema> = vec![
+ common::make_column_schema(
+ "input_str".to_string(),
+ ColumnDataType::String,
+ SemanticType::Field,
+ ),
+ common::make_column_schema(
+ "greptime_timestamp".to_string(),
+ ColumnDataType::TimestampNanosecond,
+ SemanticType::Timestamp,
+ ),
+ ];
+}
+
+#[test]
+fn test_upper() {
+ let test_input = r#"
+{
+ "input_str": "aaa"
+}"#;
+
+ let pipeline_yaml = r#"
+processors:
+ - letter:
+ fields:
+ - input_str
+ method: upper
+
+transform:
+ - fields:
+ - input_str
+ type: string
+"#;
+
+ let output = common::parse_and_exec(test_input, pipeline_yaml);
+ assert_eq!(output.schema, *EXPECTED_SCHEMA);
+ assert_eq!(
+ output.rows[0].values[0].value_data,
+ Some(ValueData::StringValue("AAA".to_string()))
+ );
+}
+
+#[test]
+fn test_lower() {
+ let test_input = r#"
+{
+ "input_str": "AAA"
+}"#;
+
+ let pipeline_yaml = r#"
+processors:
+ - letter:
+ fields:
+ - input_str
+ method: lower
+
+transform:
+ - fields:
+ - input_str
+ type: string
+"#;
+
+ let output = common::parse_and_exec(test_input, pipeline_yaml);
+ assert_eq!(output.schema, *EXPECTED_SCHEMA);
+ assert_eq!(
+ output.rows[0].values[0].value_data,
+ Some(ValueData::StringValue("aaa".to_string()))
+ );
+}
+
+#[test]
+fn test_capital() {
+ let test_input = r#"
+{
+ "upper": "AAA",
+ "lower": "aaa"
+}"#;
+
+ let pipeline_yaml = r#"
+processors:
+ - letter:
+ fields:
+ - upper
+ - lower
+ method: capital
+
+transform:
+ - fields:
+ - upper
+ - lower
+ type: string
+"#;
+
+ let expected_schema = vec![
+ common::make_column_schema(
+ "upper".to_string(),
+ ColumnDataType::String,
+ SemanticType::Field,
+ ),
+ common::make_column_schema(
+ "lower".to_string(),
+ ColumnDataType::String,
+ SemanticType::Field,
+ ),
+ common::make_column_schema(
+ "greptime_timestamp".to_string(),
+ ColumnDataType::TimestampNanosecond,
+ SemanticType::Timestamp,
+ ),
+ ];
+
+ let output = common::parse_and_exec(test_input, pipeline_yaml);
+ assert_eq!(output.schema, expected_schema);
+ assert_eq!(
+ output.rows[0].values[0].value_data,
+ Some(ValueData::StringValue("AAA".to_string()))
+ );
+ assert_eq!(
+ output.rows[0].values[1].value_data,
+ Some(ValueData::StringValue("Aaa".to_string()))
+ );
+}
+
+#[test]
+fn test_ignore_missing() {
+ let test_input = r#"{}"#;
+
+ let pipeline_yaml = r#"
+processors:
+ - letter:
+ fields:
+ - upper
+ - lower
+ method: capital
+ ignore_missing: true
+
+transform:
+ - fields:
+ - upper
+ - lower
+ type: string
+"#;
+
+ let expected_schema = vec![
+ common::make_column_schema(
+ "upper".to_string(),
+ ColumnDataType::String,
+ SemanticType::Field,
+ ),
+ common::make_column_schema(
+ "lower".to_string(),
+ ColumnDataType::String,
+ SemanticType::Field,
+ ),
+ common::make_column_schema(
+ "greptime_timestamp".to_string(),
+ ColumnDataType::TimestampNanosecond,
+ SemanticType::Timestamp,
+ ),
+ ];
+
+ let output = common::parse_and_exec(test_input, pipeline_yaml);
+ assert_eq!(output.schema, expected_schema);
+ assert_eq!(output.rows[0].values[0].value_data, None);
+ assert_eq!(output.rows[0].values[1].value_data, None);
+}
diff --git a/src/pipeline/tests/regex.rs b/src/pipeline/tests/regex.rs
index 5519c613951f..5be60c987525 100644
--- a/src/pipeline/tests/regex.rs
+++ b/src/pipeline/tests/regex.rs
@@ -14,8 +14,25 @@
mod common;
+use api::v1::ColumnSchema;
use greptime_proto::v1::value::ValueData::StringValue;
use greptime_proto::v1::{ColumnDataType, SemanticType};
+use lazy_static::lazy_static;
+
+lazy_static! {
+ static ref EXPECTED_SCHEMA: Vec<ColumnSchema> = vec![
+ common::make_column_schema(
+ "str_id".to_string(),
+ ColumnDataType::String,
+ SemanticType::Field,
+ ),
+ common::make_column_schema(
+ "greptime_timestamp".to_string(),
+ ColumnDataType::TimestampNanosecond,
+ SemanticType::Timestamp,
+ ),
+ ];
+}
#[test]
fn test_regex_pattern() {
@@ -41,20 +58,7 @@ transform:
let output = common::parse_and_exec(input_value_str, pipeline_yaml);
- let expected_schema = vec![
- common::make_column_schema(
- "str_id".to_string(),
- ColumnDataType::String,
- SemanticType::Field,
- ),
- common::make_column_schema(
- "greptime_timestamp".to_string(),
- ColumnDataType::TimestampNanosecond,
- SemanticType::Timestamp,
- ),
- ];
-
- assert_eq!(output.schema, expected_schema);
+ assert_eq!(output.schema, *EXPECTED_SCHEMA);
assert_eq!(
output.rows[0].values[0].value_data,
@@ -87,23 +91,34 @@ transform:
let output = common::parse_and_exec(input_value_str, pipeline_yaml);
- let expected_schema = vec![
- common::make_column_schema(
- "str_id".to_string(),
- ColumnDataType::String,
- SemanticType::Field,
- ),
- common::make_column_schema(
- "greptime_timestamp".to_string(),
- ColumnDataType::TimestampNanosecond,
- SemanticType::Timestamp,
- ),
- ];
-
- assert_eq!(output.schema, expected_schema);
+ assert_eq!(output.schema, *EXPECTED_SCHEMA);
assert_eq!(
output.rows[0].values[0].value_data,
Some(StringValue("123".to_string()))
);
}
+
+#[test]
+fn test_ignore_missing() {
+ let input_value_str = r#"{}"#;
+
+ let pipeline_yaml = r#"
+processors:
+ - regex:
+ fields:
+ - str
+ pattern: "(?<id>\\d+)"
+ ignore_missing: true
+
+transform:
+ - field: str_id
+ type: string
+"#;
+
+ let output = common::parse_and_exec(input_value_str, pipeline_yaml);
+
+ assert_eq!(output.schema, *EXPECTED_SCHEMA);
+
+ assert_eq!(output.rows[0].values[0].value_data, None);
+}
diff --git a/src/pipeline/tests/urlencoding.rs b/src/pipeline/tests/urlencoding.rs
new file mode 100644
index 000000000000..dd0c4ffe9f2f
--- /dev/null
+++ b/src/pipeline/tests/urlencoding.rs
@@ -0,0 +1,112 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+mod common;
+
+use greptime_proto::v1::value::ValueData;
+use greptime_proto::v1::{ColumnDataType, SemanticType};
+
+#[test]
+fn test() {
+ let test_input = r#"
+{
+ "encoding": "2024-06-27T06:13:36.991Z",
+ "decoding": "2024-06-27T06%3A13%3A36.991Z"
+}"#;
+
+ let pipeline_yaml = r#"
+processors:
+ - urlencoding:
+ field: encoding
+ method: encode
+
+ - urlencoding:
+ field: decoding
+ method: decode
+
+transform:
+ - fields:
+ - encoding
+ - decoding
+ type: string
+"#;
+
+ let expected_schema = vec![
+ common::make_column_schema(
+ "encoding".to_string(),
+ ColumnDataType::String,
+ SemanticType::Field,
+ ),
+ common::make_column_schema(
+ "decoding".to_string(),
+ ColumnDataType::String,
+ SemanticType::Field,
+ ),
+ common::make_column_schema(
+ "greptime_timestamp".to_string(),
+ ColumnDataType::TimestampNanosecond,
+ SemanticType::Timestamp,
+ ),
+ ];
+
+ let output = common::parse_and_exec(test_input, pipeline_yaml);
+ assert_eq!(output.schema, expected_schema);
+ assert_eq!(
+ output.rows[0].values[0].value_data,
+ Some(ValueData::StringValue(
+ "2024-06-27T06%3A13%3A36.991Z".to_string()
+ ))
+ );
+ assert_eq!(
+ output.rows[0].values[1].value_data,
+ Some(ValueData::StringValue(
+ "2024-06-27T06:13:36.991Z".to_string()
+ ))
+ );
+}
+
+#[test]
+fn test_ignore_missing() {
+ let test_input = r#"{}"#;
+
+ let pipeline_yaml = r#"
+processors:
+ - urlencoding:
+ field: encoding
+ method: encode
+ ignore_missing: true
+
+transform:
+ - fields:
+ - encoding
+ type: string
+"#;
+
+ let expected_schema = vec![
+ common::make_column_schema(
+ "encoding".to_string(),
+ ColumnDataType::String,
+ SemanticType::Field,
+ ),
+ common::make_column_schema(
+ "greptime_timestamp".to_string(),
+ ColumnDataType::TimestampNanosecond,
+ SemanticType::Timestamp,
+ ),
+ ];
+
+ let output = common::parse_and_exec(test_input, pipeline_yaml);
+ assert_eq!(output.schema, expected_schema);
+ assert_eq!(output.rows[0].values[0].value_data, None);
+}
|
test
|
more on processors (#4493)
|
3bf9981aab99fad7cffd1c14fe6b6b2bf90c2543
|
2024-11-13 08:33:51
|
Weny Xu
|
refactor: support distinct JSON format and improve type conversions (#4979)
| false
|
diff --git a/src/api/src/helper.rs b/src/api/src/helper.rs
index 64baae1187c8..2cd338e1774a 100644
--- a/src/api/src/helper.rs
+++ b/src/api/src/helper.rs
@@ -268,7 +268,7 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
ConcreteDataType::UInt64(_) => ColumnDataType::Uint64,
ConcreteDataType::Float32(_) => ColumnDataType::Float32,
ConcreteDataType::Float64(_) => ColumnDataType::Float64,
- ConcreteDataType::Binary(_) | ConcreteDataType::Json(_) => ColumnDataType::Binary,
+ ConcreteDataType::Binary(_) => ColumnDataType::Binary,
ConcreteDataType::String(_) => ColumnDataType::String,
ConcreteDataType::Date(_) => ColumnDataType::Date,
ConcreteDataType::DateTime(_) => ColumnDataType::Datetime,
@@ -290,6 +290,7 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
IntervalType::MonthDayNano(_) => ColumnDataType::IntervalMonthDayNano,
},
ConcreteDataType::Decimal128(_) => ColumnDataType::Decimal128,
+ ConcreteDataType::Json(_) => ColumnDataType::Json,
ConcreteDataType::Vector(_) => ColumnDataType::Vector,
ConcreteDataType::Null(_)
| ConcreteDataType::List(_)
@@ -309,16 +310,9 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
})),
})
}
- ColumnDataType::Binary => {
- if datatype == ConcreteDataType::json_datatype() {
- // Json is the same as binary in proto. The extension marks the binary in proto is actually a json.
- Some(ColumnDataTypeExtension {
- type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())),
- })
- } else {
- None
- }
- }
+ ColumnDataType::Json => datatype.as_json().map(|_| ColumnDataTypeExtension {
+ type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())),
+ }),
ColumnDataType::Vector => {
datatype
.as_vector()
diff --git a/src/datatypes/src/data_type.rs b/src/datatypes/src/data_type.rs
index a967ad5fc886..8f81a0c86f76 100644
--- a/src/datatypes/src/data_type.rs
+++ b/src/datatypes/src/data_type.rs
@@ -343,6 +343,13 @@ impl ConcreteDataType {
}
}
+ pub fn as_json(&self) -> Option<JsonType> {
+ match self {
+ ConcreteDataType::Json(j) => Some(*j),
+ _ => None,
+ }
+ }
+
pub fn as_vector(&self) -> Option<VectorType> {
match self {
ConcreteDataType::Vector(v) => Some(*v),
diff --git a/src/datatypes/src/types.rs b/src/datatypes/src/types.rs
index 4e991c1868fe..af7016ff82ce 100644
--- a/src/datatypes/src/types.rs
+++ b/src/datatypes/src/types.rs
@@ -44,7 +44,9 @@ pub use duration_type::{
pub use interval_type::{
IntervalDayTimeType, IntervalMonthDayNanoType, IntervalType, IntervalYearMonthType,
};
-pub use json_type::{JsonType, JSON_TYPE_NAME};
+pub use json_type::{
+ json_type_value_to_string, parse_string_to_json_type_value, JsonType, JSON_TYPE_NAME,
+};
pub use list_type::ListType;
pub use null_type::NullType;
pub use primitive_type::{
diff --git a/src/datatypes/src/types/json_type.rs b/src/datatypes/src/types/json_type.rs
index 416b59b5c0ef..0c004e2d446e 100644
--- a/src/datatypes/src/types/json_type.rs
+++ b/src/datatypes/src/types/json_type.rs
@@ -12,13 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::sync::Arc;
-
use arrow::datatypes::DataType as ArrowDataType;
use common_base::bytes::Bytes;
use serde::{Deserialize, Serialize};
-use crate::data_type::{DataType, DataTypeRef};
+use crate::data_type::DataType;
+use crate::error::{InvalidJsonSnafu, Result};
use crate::scalars::ScalarVectorBuilder;
use crate::type_id::LogicalTypeId;
use crate::value::Value;
@@ -26,14 +25,29 @@ use crate::vectors::{BinaryVectorBuilder, MutableVector};
pub const JSON_TYPE_NAME: &str = "Json";
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)]
+pub enum JsonFormat {
+ Jsonb,
+}
+
+impl Default for JsonFormat {
+ fn default() -> Self {
+ Self::Jsonb
+ }
+}
+
/// JsonType is a data type for JSON data. It is stored as binary data of jsonb format.
/// It utilizes current binary value and vector implementation.
-#[derive(Debug, Default, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)]
-pub struct JsonType;
+#[derive(
+ Debug, Default, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize,
+)]
+pub struct JsonType {
+ pub format: JsonFormat,
+}
impl JsonType {
- pub fn arc() -> DataTypeRef {
- Arc::new(Self)
+ pub fn new(format: JsonFormat) -> Self {
+ Self { format }
}
}
@@ -65,3 +79,19 @@ impl DataType for JsonType {
}
}
}
+
+/// Converts a json type value to string
+pub fn json_type_value_to_string(val: &[u8], format: &JsonFormat) -> Result<String> {
+ match format {
+ JsonFormat::Jsonb => Ok(jsonb::to_string(val)),
+ }
+}
+
+/// Parses a string to a json type value
+pub fn parse_string_to_json_type_value(s: &str, format: &JsonFormat) -> Result<Vec<u8>> {
+ match format {
+ JsonFormat::Jsonb => jsonb::parse_value(s.as_bytes())
+ .map_err(|_| InvalidJsonSnafu { value: s }.build())
+ .map(|json| json.to_vec()),
+ }
+}
diff --git a/src/mito2/src/request.rs b/src/mito2/src/request.rs
index 299a3455787d..c6dfcbe6c716 100644
--- a/src/mito2/src/request.rs
+++ b/src/mito2/src/request.rs
@@ -398,6 +398,7 @@ fn proto_value_type_match(column_type: ColumnDataType, value_type: ColumnDataTyp
match (column_type, value_type) {
(ct, vt) if ct == vt => true,
(ColumnDataType::Vector, ColumnDataType::Binary) => true,
+ (ColumnDataType::Json, ColumnDataType::Binary) => true,
_ => false,
}
}
diff --git a/src/operator/src/req_convert/common.rs b/src/operator/src/req_convert/common.rs
index 2822f1dbec6f..518f2d55fa5c 100644
--- a/src/operator/src/req_convert/common.rs
+++ b/src/operator/src/req_convert/common.rs
@@ -90,7 +90,7 @@ fn prepare_rows(rows: &mut Option<Rows>) -> Result<()> {
column.datatype_extension = Some(ColumnDataTypeExtension {
type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())),
});
- column.datatype = ColumnDataType::Binary.into();
+ column.datatype = ColumnDataType::Json.into();
}
for idx in &indexes {
diff --git a/src/servers/src/mysql/writer.rs b/src/servers/src/mysql/writer.rs
index 6e46a2b65231..90faa171debc 100644
--- a/src/servers/src/mysql/writer.rs
+++ b/src/servers/src/mysql/writer.rs
@@ -21,7 +21,7 @@ use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use common_telemetry::{debug, error};
use datatypes::prelude::{ConcreteDataType, Value};
use datatypes::schema::SchemaRef;
-use datatypes::types::vector_type_value_to_string;
+use datatypes::types::{json_type_value_to_string, vector_type_value_to_string};
use futures::StreamExt;
use opensrv_mysql::{
Column, ColumnFlags, ColumnType, ErrorKind, OkResponse, QueryResultWriter, RowWriter,
@@ -212,8 +212,10 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
Value::Float64(v) => row_writer.write_col(v.0)?,
Value::String(v) => row_writer.write_col(v.as_utf8())?,
Value::Binary(v) => match column.data_type {
- ConcreteDataType::Json(_) => {
- row_writer.write_col(jsonb::to_string(&v))?;
+ ConcreteDataType::Json(j) => {
+ let s = json_type_value_to_string(&v, &j.format)
+ .context(ConvertSqlValueSnafu)?;
+ row_writer.write_col(s)?;
}
ConcreteDataType::Vector(d) => {
let s = vector_type_value_to_string(&v, d.dim)
diff --git a/src/servers/src/postgres/types.rs b/src/servers/src/postgres/types.rs
index a26d803b805c..b5a331976644 100644
--- a/src/servers/src/postgres/types.rs
+++ b/src/servers/src/postgres/types.rs
@@ -27,7 +27,9 @@ use datafusion_expr::LogicalPlan;
use datatypes::arrow::datatypes::DataType as ArrowDataType;
use datatypes::prelude::{ConcreteDataType, Value};
use datatypes::schema::Schema;
-use datatypes::types::{vector_type_value_to_string, IntervalType, TimestampType};
+use datatypes::types::{
+ json_type_value_to_string, vector_type_value_to_string, IntervalType, TimestampType,
+};
use datatypes::value::ListValue;
use pgwire::api::portal::{Format, Portal};
use pgwire::api::results::{DataRowEncoder, FieldInfo};
@@ -350,13 +352,17 @@ fn encode_array(
.collect::<PgWireResult<Vec<Option<String>>>>()?;
builder.encode_field(&array)
}
- &ConcreteDataType::Json(_) => {
+ &ConcreteDataType::Json(j) => {
let array = value_list
.items()
.iter()
.map(|v| match v {
Value::Null => Ok(None),
- Value::Binary(v) => Ok(Some(jsonb::to_string(v))),
+ Value::Binary(v) => {
+ let s = json_type_value_to_string(v, &j.format)
+ .map_err(|e| PgWireError::ApiError(Box::new(e)))?;
+ Ok(Some(s))
+ }
_ => Err(PgWireError::ApiError(Box::new(Error::Internal {
err_msg: format!("Invalid list item type, find {v:?}, expected json",),
}))),
@@ -412,7 +418,11 @@ pub(super) fn encode_value(
Value::Float64(v) => builder.encode_field(&v.0),
Value::String(v) => builder.encode_field(&v.as_utf8()),
Value::Binary(v) => match datatype {
- ConcreteDataType::Json(_) => builder.encode_field(&jsonb::to_string(v)),
+ ConcreteDataType::Json(j) => {
+ let s = json_type_value_to_string(v, &j.format)
+ .map_err(|e| PgWireError::ApiError(Box::new(e)))?;
+ builder.encode_field(&s)
+ }
ConcreteDataType::Vector(d) => {
let s = vector_type_value_to_string(v, d.dim)
.map_err(|e| PgWireError::ApiError(Box::new(e)))?;
diff --git a/src/sql/src/statements.rs b/src/sql/src/statements.rs
index fd2cb3dee268..bb0844a46928 100644
--- a/src/sql/src/statements.rs
+++ b/src/sql/src/statements.rs
@@ -42,7 +42,9 @@ use common_time::Timestamp;
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::constraint::{CURRENT_TIMESTAMP, CURRENT_TIMESTAMP_FN};
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, COMMENT_KEY};
-use datatypes::types::{cast, parse_string_to_vector_type_value, TimestampType};
+use datatypes::types::{
+ cast, parse_string_to_json_type_value, parse_string_to_vector_type_value, TimestampType,
+};
use datatypes::value::{OrderedF32, OrderedF64, Value};
use snafu::{ensure, OptionExt, ResultExt};
use sqlparser::ast::{ExactNumberInfo, Ident, ObjectName, UnaryOperator};
@@ -126,15 +128,9 @@ fn parse_string_to_value(
}
}
ConcreteDataType::Binary(_) => Ok(Value::Binary(s.as_bytes().into())),
- ConcreteDataType::Json(_) => {
- if let Ok(json) = jsonb::parse_value(s.as_bytes()) {
- Ok(Value::Binary(json.to_vec().into()))
- } else {
- ParseSqlValueSnafu {
- msg: format!("Failed to parse {s} to Json value"),
- }
- .fail()
- }
+ ConcreteDataType::Json(j) => {
+ let v = parse_string_to_json_type_value(&s, &j.format).context(DatatypeSnafu)?;
+ Ok(Value::Binary(v.into()))
}
ConcreteDataType::Vector(d) => {
let v = parse_string_to_vector_type_value(&s, d.dim).context(DatatypeSnafu)?;
|
refactor
|
support distinct JSON format and improve type conversions (#4979)
|
e5663a075f09b9fc47c10b7987738c5743e2be5e
|
2023-08-04 08:23:02
|
Yingwen
|
feat(mito): preparation to implementing write (#2085)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 0074c227fe9f..7ff3c5be1f01 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -211,7 +211,7 @@ dependencies = [
"common-error",
"common-time",
"datatypes",
- "greptime-proto",
+ "greptime-proto 0.1.0 (git+https://github.com/GreptimeTeam/greptime-proto.git?rev=eeae2d0dfa8ee320a7b9e987b4631a6c1c732ebd)",
"prost",
"snafu",
"tonic 0.9.2",
@@ -4111,6 +4111,18 @@ version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
+[[package]]
+name = "greptime-proto"
+version = "0.1.0"
+source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=ec4b84931378004db60d168e2604bc3fb9735e9c#ec4b84931378004db60d168e2604bc3fb9735e9c"
+dependencies = [
+ "prost",
+ "serde",
+ "serde_json",
+ "tonic 0.9.2",
+ "tonic-build",
+]
+
[[package]]
name = "greptime-proto"
version = "0.1.0"
@@ -5498,6 +5510,7 @@ dependencies = [
"datafusion-common",
"datatypes",
"futures",
+ "greptime-proto 0.1.0 (git+https://github.com/GreptimeTeam/greptime-proto.git?rev=ec4b84931378004db60d168e2604bc3fb9735e9c)",
"lazy_static",
"log-store",
"metrics",
@@ -6982,7 +6995,7 @@ dependencies = [
"datafusion",
"datatypes",
"futures",
- "greptime-proto",
+ "greptime-proto 0.1.0 (git+https://github.com/GreptimeTeam/greptime-proto.git?rev=eeae2d0dfa8ee320a7b9e987b4631a6c1c732ebd)",
"promql-parser",
"prost",
"query",
@@ -7252,7 +7265,7 @@ dependencies = [
"format_num",
"futures",
"futures-util",
- "greptime-proto",
+ "greptime-proto 0.1.0 (git+https://github.com/GreptimeTeam/greptime-proto.git?rev=eeae2d0dfa8ee320a7b9e987b4631a6c1c732ebd)",
"humantime",
"metrics",
"num",
diff --git a/src/mito2/Cargo.toml b/src/mito2/Cargo.toml
index bdc7ac2da9b7..5e88a9dccb35 100644
--- a/src/mito2/Cargo.toml
+++ b/src/mito2/Cargo.toml
@@ -32,6 +32,8 @@ datafusion.workspace = true
datafusion-common.workspace = true
datatypes = { path = "../datatypes" }
futures.workspace = true
+# TODO(yingwen): Update and use api crate once https://github.com/GreptimeTeam/greptime-proto/pull/75 is merged.
+greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ec4b84931378004db60d168e2604bc3fb9735e9c" }
lazy_static = "1.4"
log-store = { path = "../log-store" }
metrics.workspace = true
diff --git a/src/mito2/src/engine.rs b/src/mito2/src/engine.rs
index c1ef1cb1aa69..e4a74527cb7c 100644
--- a/src/mito2/src/engine.rs
+++ b/src/mito2/src/engine.rs
@@ -26,8 +26,9 @@ use store_api::storage::RegionId;
use crate::config::MitoConfig;
use crate::error::{RecvSnafu, Result};
-pub use crate::request::CreateRequest;
-use crate::request::{CloseRequest, OpenRequest, RegionRequest, RequestBody};
+use crate::request::{
+ CloseRequest, CreateRequest, OpenRequest, RegionRequest, RequestBody, WriteRequest,
+};
use crate::worker::WorkerGroup;
/// Region engine implementation for timeseries data.
@@ -84,6 +85,19 @@ impl MitoEngine {
pub fn is_region_exists(&self, region_id: RegionId) -> bool {
self.inner.workers.is_region_exists(region_id)
}
+
+ /// Write to a region.
+ pub async fn write_region(&self, write_request: WriteRequest) -> Result<()> {
+ write_request.validate()?;
+
+ // TODO(yingwen): Fill default values.
+ // We need to fill default values before writing it to WAL so we can get
+ // the same default value after reopening the region.
+
+ self.inner
+ .handle_request_body(RequestBody::Write(write_request))
+ .await
+ }
}
/// Inner struct of [MitoEngine].
diff --git a/src/mito2/src/error.rs b/src/mito2/src/error.rs
index 508796945649..4b92522118d3 100644
--- a/src/mito2/src/error.rs
+++ b/src/mito2/src/error.rs
@@ -174,6 +174,37 @@ pub enum Error {
reason: String,
location: Location,
},
+
+ #[snafu(display("Invalid request to region {}, reason: {}", region_id, reason))]
+ InvalidRequest {
+ region_id: RegionId,
+ reason: String,
+ location: Location,
+ },
+
+ /// An error type to indicate that schema is changed and we need
+ /// to fill default values again.
+ #[snafu(display(
+ "Need to fill default value to column {} of region {}",
+ column,
+ region_id
+ ))]
+ FillDefault {
+ region_id: RegionId,
+ column: String,
+ // The error is for retry purpose so we don't need a location.
+ },
+
+ #[snafu(display(
+ "Failed to create default value for column {} of region {}",
+ column,
+ region_id
+ ))]
+ CreateDefault {
+ region_id: RegionId,
+ column: String,
+ source: datatypes::Error,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -193,10 +224,13 @@ impl ErrorExt for Error {
| RegionExists { .. }
| NewRecordBatch { .. }
| RegionNotFound { .. }
- | RegionCorrupted { .. } => StatusCode::Unexpected,
- InvalidScanIndex { .. } | InvalidMeta { .. } | InvalidSchema { .. } => {
- StatusCode::InvalidArguments
- }
+ | RegionCorrupted { .. }
+ | CreateDefault { .. } => StatusCode::Unexpected,
+ InvalidScanIndex { .. }
+ | InvalidMeta { .. }
+ | InvalidSchema { .. }
+ | InvalidRequest { .. }
+ | FillDefault { .. } => StatusCode::InvalidArguments,
RegionMetadataNotFound { .. } | Join { .. } | WorkerStopped { .. } | Recv { .. } => {
StatusCode::Internal
}
diff --git a/src/mito2/src/lib.rs b/src/mito2/src/lib.rs
index b3953a2a1cd2..1aeacc9270d1 100644
--- a/src/mito2/src/lib.rs
+++ b/src/mito2/src/lib.rs
@@ -31,6 +31,7 @@ pub mod manifest;
pub mod memtable;
#[allow(dead_code)]
pub mod metadata;
+pub(crate) mod proto_util;
pub mod read;
#[allow(dead_code)]
mod region;
diff --git a/src/mito2/src/metadata.rs b/src/mito2/src/metadata.rs
index f201a88002be..7f1588d1559c 100644
--- a/src/mito2/src/metadata.rs
+++ b/src/mito2/src/metadata.rs
@@ -310,14 +310,14 @@ impl ColumnMetadata {
}
/// The semantic type of one column
-#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
+#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
pub enum SemanticType {
/// Tag column, also is a part of primary key.
- Tag,
+ Tag = 0,
/// A column that isn't a time index or part of primary key.
- Field,
+ Field = 1,
/// Time index column.
- Timestamp,
+ Timestamp = 2,
}
/// Fields skipped in serialization.
diff --git a/src/mito2/src/proto_util.rs b/src/mito2/src/proto_util.rs
new file mode 100644
index 000000000000..6884dff3604d
--- /dev/null
+++ b/src/mito2/src/proto_util.rs
@@ -0,0 +1,188 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Utilities to process protobuf messages.
+
+use common_time::timestamp::TimeUnit;
+use datatypes::prelude::ConcreteDataType;
+use datatypes::types::{TimeType, TimestampType};
+use datatypes::value::Value;
+use greptime_proto::v1::{self, ColumnDataType};
+use store_api::storage::OpType;
+
+use crate::metadata::SemanticType;
+
+/// Returns true if the pb semantic type is valid.
+pub(crate) fn is_semantic_type_eq(type_value: i32, semantic_type: SemanticType) -> bool {
+ type_value == semantic_type as i32
+}
+
+/// Returns true if the pb type value is valid.
+pub(crate) fn is_column_type_value_eq(type_value: i32, expect_type: &ConcreteDataType) -> bool {
+ let Some(column_type) = ColumnDataType::from_i32(type_value) else {
+ return false;
+ };
+
+ is_column_type_eq(column_type, expect_type)
+}
+
+/// Convert value into proto's value.
+pub(crate) fn to_proto_value(value: Value) -> Option<v1::Value> {
+ let proto_value = match value {
+ Value::Null => v1::Value { value: None },
+ Value::Boolean(v) => v1::Value {
+ value: Some(v1::value::Value::BoolValue(v)),
+ },
+ Value::UInt8(v) => v1::Value {
+ value: Some(v1::value::Value::U8Value(v.into())),
+ },
+ Value::UInt16(v) => v1::Value {
+ value: Some(v1::value::Value::U16Value(v.into())),
+ },
+ Value::UInt32(v) => v1::Value {
+ value: Some(v1::value::Value::U32Value(v)),
+ },
+ Value::UInt64(v) => v1::Value {
+ value: Some(v1::value::Value::U64Value(v)),
+ },
+ Value::Int8(v) => v1::Value {
+ value: Some(v1::value::Value::I8Value(v.into())),
+ },
+ Value::Int16(v) => v1::Value {
+ value: Some(v1::value::Value::I16Value(v.into())),
+ },
+ Value::Int32(v) => v1::Value {
+ value: Some(v1::value::Value::I32Value(v)),
+ },
+ Value::Int64(v) => v1::Value {
+ value: Some(v1::value::Value::I64Value(v)),
+ },
+ Value::Float32(v) => v1::Value {
+ value: Some(v1::value::Value::F32Value(*v)),
+ },
+ Value::Float64(v) => v1::Value {
+ value: Some(v1::value::Value::F64Value(*v)),
+ },
+ Value::String(v) => v1::Value {
+ value: Some(v1::value::Value::StringValue(v.as_utf8().to_string())),
+ },
+ Value::Binary(v) => v1::Value {
+ value: Some(v1::value::Value::BinaryValue(v.to_vec())),
+ },
+ Value::Date(v) => v1::Value {
+ value: Some(v1::value::Value::DateValue(v.val())),
+ },
+ Value::DateTime(v) => v1::Value {
+ value: Some(v1::value::Value::DatetimeValue(v.val())),
+ },
+ Value::Timestamp(v) => match v.unit() {
+ TimeUnit::Second => v1::Value {
+ value: Some(v1::value::Value::TsSecondValue(v.value())),
+ },
+ TimeUnit::Millisecond => v1::Value {
+ value: Some(v1::value::Value::TsMillisecondValue(v.value())),
+ },
+ TimeUnit::Microsecond => v1::Value {
+ value: Some(v1::value::Value::TsMicrosecondValue(v.value())),
+ },
+ TimeUnit::Nanosecond => v1::Value {
+ value: Some(v1::value::Value::TsNanosecondValue(v.value())),
+ },
+ },
+ Value::Time(v) => match v.unit() {
+ TimeUnit::Second => v1::Value {
+ value: Some(v1::value::Value::TimeSecondValue(v.value())),
+ },
+ TimeUnit::Millisecond => v1::Value {
+ value: Some(v1::value::Value::TimeMillisecondValue(v.value())),
+ },
+ TimeUnit::Microsecond => v1::Value {
+ value: Some(v1::value::Value::TimeMicrosecondValue(v.value())),
+ },
+ TimeUnit::Nanosecond => v1::Value {
+ value: Some(v1::value::Value::TimeNanosecondValue(v.value())),
+ },
+ },
+ Value::Interval(_) | Value::List(_) => return None,
+ };
+
+ Some(proto_value)
+}
+
+/// Convert [ConcreteDataType] to [ColumnDataType].
+pub(crate) fn to_column_data_type(data_type: &ConcreteDataType) -> Option<ColumnDataType> {
+ let column_data_type = match data_type {
+ ConcreteDataType::Boolean(_) => ColumnDataType::Boolean,
+ ConcreteDataType::Int8(_) => ColumnDataType::Int8,
+ ConcreteDataType::Int16(_) => ColumnDataType::Int16,
+ ConcreteDataType::Int32(_) => ColumnDataType::Int32,
+ ConcreteDataType::Int64(_) => ColumnDataType::Int64,
+ ConcreteDataType::UInt8(_) => ColumnDataType::Uint8,
+ ConcreteDataType::UInt16(_) => ColumnDataType::Uint16,
+ ConcreteDataType::UInt32(_) => ColumnDataType::Uint32,
+ ConcreteDataType::UInt64(_) => ColumnDataType::Uint64,
+ ConcreteDataType::Float32(_) => ColumnDataType::Float32,
+ ConcreteDataType::Float64(_) => ColumnDataType::Float64,
+ ConcreteDataType::Binary(_) => ColumnDataType::Binary,
+ ConcreteDataType::String(_) => ColumnDataType::String,
+ ConcreteDataType::Date(_) => ColumnDataType::Date,
+ ConcreteDataType::DateTime(_) => ColumnDataType::Datetime,
+ ConcreteDataType::Timestamp(TimestampType::Second(_)) => ColumnDataType::TimestampSecond,
+ ConcreteDataType::Timestamp(TimestampType::Millisecond(_)) => {
+ ColumnDataType::TimestampMillisecond
+ }
+ ConcreteDataType::Timestamp(TimestampType::Microsecond(_)) => {
+ ColumnDataType::TimestampMicrosecond
+ }
+ ConcreteDataType::Timestamp(TimestampType::Nanosecond(_)) => {
+ ColumnDataType::TimestampNanosecond
+ }
+ ConcreteDataType::Time(TimeType::Second(_)) => ColumnDataType::TimeSecond,
+ ConcreteDataType::Time(TimeType::Millisecond(_)) => ColumnDataType::TimeMillisecond,
+ ConcreteDataType::Time(TimeType::Microsecond(_)) => ColumnDataType::TimeMicrosecond,
+ ConcreteDataType::Time(TimeType::Nanosecond(_)) => ColumnDataType::TimeNanosecond,
+ ConcreteDataType::Null(_)
+ | ConcreteDataType::Interval(_)
+ | ConcreteDataType::List(_)
+ | ConcreteDataType::Dictionary(_) => return None,
+ };
+
+ Some(column_data_type)
+}
+
+/// Convert semantic type to proto's semantic type
+pub(crate) fn to_proto_semantic_type(semantic_type: SemanticType) -> v1::SemanticType {
+ match semantic_type {
+ SemanticType::Tag => v1::SemanticType::Tag,
+ SemanticType::Field => v1::SemanticType::Field,
+ SemanticType::Timestamp => v1::SemanticType::Timestamp,
+ }
+}
+
+/// Convert op type to proto's op type.
+pub(crate) fn to_proto_op_type(op_type: OpType) -> v1::mito::OpType {
+ match op_type {
+ OpType::Delete => v1::mito::OpType::Delete,
+ OpType::Put => v1::mito::OpType::Put,
+ }
+}
+
+/// Returns true if the column type is equal to expected type.
+fn is_column_type_eq(column_type: ColumnDataType, expect_type: &ConcreteDataType) -> bool {
+ if let Some(expect) = to_column_data_type(expect_type) {
+ column_type == expect
+ } else {
+ false
+ }
+}
diff --git a/src/mito2/src/region.rs b/src/mito2/src/region.rs
index c0266ff0df1d..2be5ddf2478c 100644
--- a/src/mito2/src/region.rs
+++ b/src/mito2/src/region.rs
@@ -15,7 +15,7 @@
//! Mito region.
pub(crate) mod opener;
-mod version;
+pub(crate) mod version;
use std::collections::HashMap;
use std::sync::{Arc, RwLock};
@@ -25,7 +25,7 @@ use store_api::storage::RegionId;
use crate::error::Result;
use crate::manifest::manager::RegionManifestManager;
-use crate::region::version::VersionControlRef;
+use crate::region::version::{VersionControlRef, VersionRef};
/// Type to store region version.
pub type VersionNumber = u32;
@@ -56,6 +56,11 @@ impl MitoRegion {
Ok(())
}
+
+ /// Returns current version of the region.
+ pub(crate) fn version(&self) -> VersionRef {
+ self.version_control.current()
+ }
}
/// Regions indexed by ids.
diff --git a/src/mito2/src/region/version.rs b/src/mito2/src/region/version.rs
index d9c5e9ecece0..54fe29df3ca8 100644
--- a/src/mito2/src/region/version.rs
+++ b/src/mito2/src/region/version.rs
@@ -26,7 +26,6 @@
use std::sync::Arc;
use arc_swap::ArcSwap;
-use store_api::manifest::ManifestVersion;
use store_api::storage::SequenceNumber;
use crate::memtable::version::{MemtableVersion, MemtableVersionRef};
@@ -48,6 +47,11 @@ impl VersionControl {
version: ArcSwap::new(Arc::new(version)),
}
}
+
+ /// Returns current [Version].
+ pub(crate) fn current(&self) -> VersionRef {
+ self.version.load_full()
+ }
}
pub(crate) type VersionControlRef = Arc<VersionControl>;
@@ -59,21 +63,20 @@ pub(crate) struct Version {
///
/// Altering metadata isn't frequent, storing metadata in Arc to allow sharing
/// metadata and reuse metadata when creating a new `Version`.
- metadata: RegionMetadataRef,
+ pub(crate) metadata: RegionMetadataRef,
/// Mutable and immutable memtables.
///
/// Wrapped in Arc to make clone of `Version` much cheaper.
- memtables: MemtableVersionRef,
+ pub(crate) memtables: MemtableVersionRef,
/// SSTs of the region.
- ssts: SstVersionRef,
+ pub(crate) ssts: SstVersionRef,
/// Inclusive max sequence of flushed data.
- flushed_sequence: SequenceNumber,
- // TODO(yingwen): Remove this.
- /// Current version of region manifest.
- manifest_version: ManifestVersion,
+ pub(crate) flushed_sequence: SequenceNumber,
// TODO(yingwen): RegionOptions.
}
+pub(crate) type VersionRef = Arc<Version>;
+
/// Version builder.
pub(crate) struct VersionBuilder {
metadata: RegionMetadataRef,
@@ -94,7 +97,6 @@ impl VersionBuilder {
memtables: Arc::new(MemtableVersion::new(self.mutable)),
ssts: Arc::new(SstVersion::new()),
flushed_sequence: 0,
- manifest_version: 0,
}
}
}
diff --git a/src/mito2/src/request.rs b/src/mito2/src/request.rs
index a18105a7548e..10348ac70df4 100644
--- a/src/mito2/src/request.rs
+++ b/src/mito2/src/request.rs
@@ -14,15 +14,22 @@
//! Worker requests.
+use std::collections::HashMap;
use std::time::Duration;
use common_base::readable_size::ReadableSize;
-use store_api::storage::{ColumnId, CompactionStrategy, RegionId};
+use greptime_proto::v1::{ColumnDataType, ColumnSchema, Rows};
+use snafu::{ensure, OptionExt, ResultExt};
+use store_api::storage::{ColumnId, CompactionStrategy, OpType, RegionId};
use tokio::sync::oneshot::{self, Receiver, Sender};
use crate::config::DEFAULT_WRITE_BUFFER_SIZE;
-use crate::error::Result;
-use crate::metadata::ColumnMetadata;
+use crate::error::{CreateDefaultSnafu, FillDefaultSnafu, InvalidRequestSnafu, Result};
+use crate::metadata::{ColumnMetadata, RegionMetadata};
+use crate::proto_util::{
+ is_column_type_value_eq, is_semantic_type_eq, to_column_data_type, to_proto_semantic_type,
+ to_proto_value,
+};
/// Options that affect the entire region.
///
@@ -84,9 +91,193 @@ pub struct CloseRequest {
/// Request to write a region.
#[derive(Debug)]
-pub(crate) struct WriteRequest {
+pub struct WriteRequest {
/// Region to write.
pub region_id: RegionId,
+ /// Type of the write request.
+ pub op_type: OpType,
+ /// Rows to write.
+ pub rows: Rows,
+ /// Map column name to column index in `rows`.
+ name_to_index: HashMap<String, usize>,
+}
+
+impl WriteRequest {
+ /// Returns a new request.
+ pub fn new(region_id: RegionId, op_type: OpType, rows: Rows) -> WriteRequest {
+ let name_to_index = rows
+ .schema
+ .iter()
+ .enumerate()
+ .map(|(index, column)| (column.column_name.clone(), index))
+ .collect();
+ WriteRequest {
+ region_id,
+ op_type,
+ rows,
+ name_to_index,
+ }
+ }
+
+ /// Validate the request.
+ pub(crate) fn validate(&self) -> Result<()> {
+ // - checks whether the request is too large.
+ // - checks whether each row in rows has the same schema.
+ // - checks whether each column match the schema in Rows.
+ // - checks rows don't have duplicate columns.
+ unimplemented!()
+ }
+
+ /// Checks schema of rows.
+ ///
+ /// If column with default value is missing, it returns a special [FillDefault](crate::error::Error::FillDefault)
+ /// error.
+ pub(crate) fn check_schema(&self, metadata: &RegionMetadata) -> Result<()> {
+ let region_id = self.region_id;
+ // Index all columns in rows.
+ let mut rows_columns: HashMap<_, _> = self
+ .rows
+ .schema
+ .iter()
+ .map(|column| (&column.column_name, column))
+ .collect();
+
+ // Checks all columns in this region.
+ for column in &metadata.column_metadatas {
+ if let Some(input_col) = rows_columns.remove(&column.column_schema.name) {
+ // Check data type.
+ ensure!(
+ is_column_type_value_eq(input_col.datatype, &column.column_schema.data_type),
+ InvalidRequestSnafu {
+ region_id,
+ reason: format!(
+ "column {} expect type {:?}, given: {:?}({})",
+ column.column_schema.name,
+ column.column_schema.data_type,
+ ColumnDataType::from_i32(input_col.datatype),
+ input_col.datatype,
+ )
+ }
+ );
+
+ // Check semantic type.
+ ensure!(
+ is_semantic_type_eq(input_col.semantic_type, column.semantic_type),
+ InvalidRequestSnafu {
+ region_id,
+ reason: format!(
+ "column {} has semantic type {:?}, given: {:?}({})",
+ column.column_schema.name,
+ column.semantic_type,
+ greptime_proto::v1::SemanticType::from_i32(input_col.semantic_type),
+ input_col.semantic_type
+ ),
+ }
+ );
+ } else {
+ // For columns not in rows, checks whether they have default value.
+ ensure!(
+ column.column_schema.is_nullable()
+ || column.column_schema.default_constraint().is_some(),
+ InvalidRequestSnafu {
+ region_id,
+ reason: format!("missing column {}", column.column_schema.name),
+ }
+ );
+
+ return FillDefaultSnafu {
+ region_id,
+ column: &column.column_schema.name,
+ }
+ .fail();
+ }
+ }
+
+ // Checks all columns in rows exist in the region.
+ if !rows_columns.is_empty() {
+ let names: Vec<_> = rows_columns.into_keys().collect();
+ return InvalidRequestSnafu {
+ region_id,
+ reason: format!("unknown columns: {:?}", names),
+ }
+ .fail();
+ }
+
+ Ok(())
+ }
+
+ /// Try to fill missing columns.
+ ///
+ /// Currently, our protobuf format might be inefficient when we need to fill lots of null
+ /// values.
+ pub(crate) fn fill_missing_columns(&mut self, metadata: &RegionMetadata) -> Result<()> {
+ for column in &metadata.column_metadatas {
+ if !self.name_to_index.contains_key(&column.column_schema.name) {
+ self.fill_column(metadata.region_id, column)?;
+ }
+ }
+
+ Ok(())
+ }
+
+ /// Fill default value for specific `column`.
+ fn fill_column(&mut self, region_id: RegionId, column: &ColumnMetadata) -> Result<()> {
+ // Need to add a default value for this column.
+ let default_value = column
+ .column_schema
+ .create_default()
+ .context(CreateDefaultSnafu {
+ region_id,
+ column: &column.column_schema.name,
+ })?
+ // This column doesn't have default value.
+ .with_context(|| InvalidRequestSnafu {
+ region_id,
+ reason: format!(
+ "column {} does not have default value",
+ column.column_schema.name
+ ),
+ })?;
+
+ // Convert default value into proto's value.
+ let proto_value = to_proto_value(default_value).with_context(|| InvalidRequestSnafu {
+ region_id,
+ reason: format!(
+ "no protobuf type for default value of column {} ({:?})",
+ column.column_schema.name, column.column_schema.data_type
+ ),
+ })?;
+
+ // Insert default value to each row.
+ for row in &mut self.rows.rows {
+ row.values.push(proto_value.clone());
+ }
+
+ // Insert column schema.
+ let datatype = to_column_data_type(&column.column_schema.data_type).with_context(|| {
+ InvalidRequestSnafu {
+ region_id,
+ reason: format!(
+ "no protobuf type for column {} ({:?})",
+ column.column_schema.name, column.column_schema.data_type
+ ),
+ }
+ })?;
+ self.rows.schema.push(ColumnSchema {
+ column_name: column.column_schema.name.clone(),
+ datatype: datatype as i32,
+ semantic_type: to_proto_semantic_type(column.semantic_type) as i32,
+ });
+
+ Ok(())
+ }
+}
+
+/// Sender and write request.
+pub(crate) struct SenderWriteRequest {
+ /// Result sender.
+ pub(crate) sender: Option<Sender<Result<()>>>,
+ pub(crate) request: WriteRequest,
}
/// Request sent to a worker
@@ -127,7 +318,6 @@ impl RegionRequest {
/// Body to carry actual region request.
#[derive(Debug)]
pub(crate) enum RequestBody {
- // DML:
/// Write to a region.
Write(WriteRequest),
@@ -151,13 +341,19 @@ impl RequestBody {
}
}
- /// Returns whether the request is a DDL (e.g. CREATE/OPEN/ALTER).
- pub(crate) fn is_ddl(&self) -> bool {
+ /// Returns whether the request is a write request.
+ pub(crate) fn is_write(&self) -> bool {
+ matches!(self, RequestBody::Write(_))
+ }
+
+ /// Converts the request into a [WriteRequest].
+ ///
+ /// # Panics
+ /// Panics if it isn't a [WriteRequest].
+ pub(crate) fn into_write_request(self) -> WriteRequest {
match self {
- RequestBody::Write(_) => false,
- RequestBody::Create(_) => true,
- RequestBody::Open(_) => true,
- RequestBody::Close(_) => true,
+ RequestBody::Write(req) => req,
+ other => panic!("expect write request, found {other:?}"),
}
}
}
diff --git a/src/mito2/src/worker.rs b/src/mito2/src/worker.rs
index 874a76d52d2d..dd26f9ac3960 100644
--- a/src/mito2/src/worker.rs
+++ b/src/mito2/src/worker.rs
@@ -17,6 +17,7 @@
mod handle_close;
mod handle_create;
mod handle_open;
+mod handle_write;
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
@@ -37,7 +38,7 @@ use crate::config::MitoConfig;
use crate::error::{JoinSnafu, Result, WorkerStoppedSnafu};
use crate::memtable::{DefaultMemtableBuilder, MemtableBuilderRef};
use crate::region::{RegionMap, RegionMapRef};
-use crate::request::{RegionRequest, RequestBody, WorkerRequest};
+use crate::request::{RegionRequest, RequestBody, SenderWriteRequest, WorkerRequest};
/// Identifier for a worker.
pub(crate) type WorkerId = u32;
@@ -322,15 +323,18 @@ impl<S> RegionWorkerLoop<S> {
///
/// `buffer` should be empty.
async fn handle_requests(&mut self, buffer: &mut RequestBuffer) {
- let mut dml_requests = Vec::with_capacity(buffer.len());
+ let mut write_requests = Vec::with_capacity(buffer.len());
let mut ddl_requests = Vec::with_capacity(buffer.len());
for worker_req in buffer.drain(..) {
match worker_req {
WorkerRequest::Region(req) => {
- if req.body.is_ddl() {
- ddl_requests.push(req);
+ if req.body.is_write() {
+ write_requests.push(SenderWriteRequest {
+ sender: req.sender,
+ request: req.body.into_write_request(),
+ });
} else {
- dml_requests.push(req);
+ ddl_requests.push(req);
}
}
// We receive a stop signal, but we still want to process remaining
@@ -342,24 +346,13 @@ impl<S> RegionWorkerLoop<S> {
}
}
- // Handles all dml requests first. So we can alter regions without
- // considering existing dml requests.
- self.handle_dml_requests(dml_requests).await;
+ // Handles all write requests first. So we can alter regions without
+ // considering existing write requests.
+ self.handle_write_requests(write_requests).await;
self.handle_ddl_requests(ddl_requests).await;
}
- /// Takes and handles all dml requests.
- async fn handle_dml_requests(&mut self, write_requests: Vec<RegionRequest>) {
- if write_requests.is_empty() {
- return;
- }
-
- // Create a write context that holds meta and sequence.
-
- unimplemented!()
- }
-
/// Takes and handles all ddl requests.
async fn handle_ddl_requests(&mut self, ddl_requests: Vec<RegionRequest>) {
if ddl_requests.is_empty() {
diff --git a/src/mito2/src/worker/handle_write.rs b/src/mito2/src/worker/handle_write.rs
new file mode 100644
index 000000000000..09da51716e8c
--- /dev/null
+++ b/src/mito2/src/worker/handle_write.rs
@@ -0,0 +1,118 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Handling write requests.
+
+use std::collections::{hash_map, HashMap};
+
+use greptime_proto::v1::mito::Mutation;
+use tokio::sync::oneshot::Sender;
+
+use crate::error::{RegionNotFoundSnafu, Result};
+use crate::proto_util::to_proto_op_type;
+use crate::region::version::VersionRef;
+use crate::region::MitoRegionRef;
+use crate::request::SenderWriteRequest;
+use crate::worker::RegionWorkerLoop;
+
+impl<S> RegionWorkerLoop<S> {
+ /// Takes and handles all write requests.
+ pub(crate) async fn handle_write_requests(&mut self, write_requests: Vec<SenderWriteRequest>) {
+ if write_requests.is_empty() {
+ return;
+ }
+
+ let mut region_ctxs = HashMap::new();
+ for sender_req in write_requests {
+ let region_id = sender_req.request.region_id;
+ // Checks whether the region exists.
+ if let hash_map::Entry::Vacant(e) = region_ctxs.entry(region_id) {
+ let Some(region) = self.regions.get_region(region_id) else {
+ // No such region.
+ send_result(sender_req.sender, RegionNotFoundSnafu {
+ region_id,
+ }.fail());
+
+ continue;
+ };
+
+ // Initialize the context.
+ e.insert(RegionWriteCtx::new(region));
+ }
+
+ // Safety: Now we ensure the region exists.
+ let region_ctx = region_ctxs.get_mut(®ion_id).unwrap();
+
+ // Checks whether request schema is compatible with region schema.
+ if let Err(e) = sender_req
+ .request
+ .check_schema(®ion_ctx.version.metadata)
+ {
+ send_result(sender_req.sender, Err(e));
+
+ continue;
+ }
+
+ // Collect requests by region.
+ region_ctx.push_sender_request(sender_req);
+ }
+
+ todo!()
+ }
+}
+
+/// Send result to the request.
+fn send_result(sender: Option<Sender<Result<()>>>, res: Result<()>) {
+ if let Some(sender) = sender {
+ // Ignore send result.
+ let _ = sender.send(res);
+ }
+}
+
+/// Context to keep region metadata and buffer write requests.
+struct RegionWriteCtx {
+ /// Region to write.
+ region: MitoRegionRef,
+ /// Version of the region while creating the context.
+ version: VersionRef,
+ /// Valid mutations.
+ mutations: Vec<Mutation>,
+ /// Result senders.
+ ///
+ /// The sender is 1:1 map to the mutation in `mutations`.
+ senders: Vec<Option<Sender<Result<()>>>>,
+}
+
+impl RegionWriteCtx {
+ /// Returns an empty context.
+ fn new(region: MitoRegionRef) -> RegionWriteCtx {
+ let version = region.version();
+ RegionWriteCtx {
+ region,
+ version,
+ mutations: Vec::new(),
+ senders: Vec::new(),
+ }
+ }
+
+ /// Push [SenderWriteRequest] to the context.
+ fn push_sender_request(&mut self, sender_req: SenderWriteRequest) {
+ self.mutations.push(Mutation {
+ op_type: to_proto_op_type(sender_req.request.op_type) as i32,
+ sequence: 0, // TODO(yingwen): Set sequence.
+ rows: Some(sender_req.request.rows),
+ });
+ self.senders.push(sender_req.sender);
+ }
+}
|
feat
|
preparation to implementing write (#2085)
|
a61c0bd1d8a16b208266bbac92f3b8fe2cbdf594
|
2024-10-11 15:32:45
|
Ruihang Xia
|
fix: error in admin function is not formatted properly (#4820)
| false
|
diff --git a/src/operator/src/statement/admin.rs b/src/operator/src/statement/admin.rs
index 37e6acf9665f..6c0413c2aa2a 100644
--- a/src/operator/src/statement/admin.rs
+++ b/src/operator/src/statement/admin.rs
@@ -59,7 +59,7 @@ impl StatementExecutor {
.map(|arg| {
let FunctionArg::Unnamed(FunctionArgExpr::Expr(Expr::Value(value))) = arg else {
return error::BuildAdminFunctionArgsSnafu {
- msg: "unsupported function arg {arg}",
+ msg: format!("unsupported function arg {arg}"),
}
.fail();
};
@@ -200,7 +200,7 @@ fn values_to_vectors_by_valid_types(
}
error::BuildAdminFunctionArgsSnafu {
- msg: "failed to cast {value}",
+ msg: format!("failed to cast {value}"),
}
.fail()
})
|
fix
|
error in admin function is not formatted properly (#4820)
|
554a69ea54333cd07a7adf4d22d8cef91e2ccd97
|
2023-04-07 14:15:25
|
zyy17
|
refactor: add disable_dashboard option and disable dashboard in metasrv and datanode (#1343)
| false
|
diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs
index 796bbe75a07d..e992e3fd2d4a 100644
--- a/src/cmd/src/datanode.rs
+++ b/src/cmd/src/datanode.rs
@@ -168,6 +168,9 @@ impl TryFrom<StartCommand> for DatanodeOptions {
opts.http_opts.timeout = Duration::from_secs(http_timeout)
}
+ // Disable dashboard in datanode.
+ opts.http_opts.disable_dashboard = true;
+
Ok(opts)
}
}
diff --git a/src/cmd/src/frontend.rs b/src/cmd/src/frontend.rs
index bf16a91ad0d8..3cbdb8dd5214 100644
--- a/src/cmd/src/frontend.rs
+++ b/src/cmd/src/frontend.rs
@@ -107,6 +107,8 @@ pub struct StartCommand {
tls_key_path: Option<String>,
#[clap(long)]
user_provider: Option<String>,
+ #[clap(long)]
+ disable_dashboard: bool,
}
impl StartCommand {
@@ -149,18 +151,24 @@ impl TryFrom<StartCommand> for FrontendOptions {
let tls_option = TlsOption::new(cmd.tls_mode, cmd.tls_cert_path, cmd.tls_key_path);
+ let mut http_options = HttpOptions {
+ disable_dashboard: cmd.disable_dashboard,
+ ..Default::default()
+ };
+
if let Some(addr) = cmd.http_addr {
- opts.http_options = Some(HttpOptions {
- addr,
- ..Default::default()
- });
+ http_options.addr = addr;
}
+
+ opts.http_options = Some(http_options);
+
if let Some(addr) = cmd.grpc_addr {
opts.grpc_options = Some(GrpcOptions {
addr,
..Default::default()
});
}
+
if let Some(addr) = cmd.mysql_addr {
opts.mysql_options = Some(MysqlOptions {
addr,
@@ -227,6 +235,7 @@ mod tests {
tls_cert_path: None,
tls_key_path: None,
user_provider: None,
+ disable_dashboard: false,
};
let opts: FrontendOptions = command.try_into().unwrap();
@@ -289,6 +298,7 @@ mod tests {
tls_cert_path: None,
tls_key_path: None,
user_provider: None,
+ disable_dashboard: false,
};
let fe_opts = FrontendOptions::try_from(command).unwrap();
@@ -319,6 +329,7 @@ mod tests {
tls_cert_path: None,
tls_key_path: None,
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
+ disable_dashboard: false,
};
let plugins = load_frontend_plugins(&command.user_provider);
diff --git a/src/cmd/src/metasrv.rs b/src/cmd/src/metasrv.rs
index d9170066bc70..6bc71fe7fa40 100644
--- a/src/cmd/src/metasrv.rs
+++ b/src/cmd/src/metasrv.rs
@@ -141,6 +141,9 @@ impl TryFrom<StartCommand> for MetaSrvOptions {
opts.http_opts.timeout = Duration::from_secs(http_timeout);
}
+ // Disable dashboard in metasrv.
+ opts.http_opts.disable_dashboard = true;
+
Ok(opts)
}
}
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index aae858b59ad5..4e5fad7c5228 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -118,8 +118,12 @@ pub struct HttpServer {
#[serde(default)]
pub struct HttpOptions {
pub addr: String,
+
#[serde(with = "humantime_serde")]
pub timeout: Duration,
+
+ #[serde(skip)]
+ pub disable_dashboard: bool,
}
impl Default for HttpOptions {
@@ -127,6 +131,7 @@ impl Default for HttpOptions {
Self {
addr: "127.0.0.1:4000".to_string(),
timeout: Duration::from_secs(30),
+ disable_dashboard: false,
}
}
}
@@ -502,7 +507,10 @@ impl HttpServer {
#[cfg(feature = "dashboard")]
{
- router = router.nest("/dashboard", dashboard::dashboard());
+ if !self.options.disable_dashboard {
+ info!("Enable dashboard service at '/dashboard'");
+ router = router.nest("/dashboard", dashboard::dashboard());
+ }
}
router
|
refactor
|
add disable_dashboard option and disable dashboard in metasrv and datanode (#1343)
|
353c8230db0c429a5512c3dad2d73c65455de173
|
2025-01-03 16:29:58
|
discord9
|
refactor: flow replace check&better error msg (#5277)
| false
|
diff --git a/src/flow/src/adapter.rs b/src/flow/src/adapter.rs
index a62799164d56..085096fd9511 100644
--- a/src/flow/src/adapter.rs
+++ b/src/flow/src/adapter.rs
@@ -50,10 +50,7 @@ use crate::adapter::util::relation_desc_to_column_schemas_with_fallback;
use crate::adapter::worker::{create_worker, Worker, WorkerHandle};
use crate::compute::ErrCollector;
use crate::df_optimizer::sql_to_flow_plan;
-use crate::error::{
- EvalSnafu, ExternalSnafu, FlowAlreadyExistSnafu, InternalSnafu, InvalidQuerySnafu,
- UnexpectedSnafu,
-};
+use crate::error::{EvalSnafu, ExternalSnafu, InternalSnafu, InvalidQuerySnafu, UnexpectedSnafu};
use crate::expr::Batch;
use crate::metrics::{METRIC_FLOW_INSERT_ELAPSED, METRIC_FLOW_ROWS, METRIC_FLOW_RUN_INTERVAL_MS};
use crate::repr::{self, DiffRow, RelationDesc, Row, BATCH_SIZE};
@@ -727,43 +724,6 @@ impl FlowWorkerManager {
query_ctx,
} = args;
- let already_exist = {
- let mut flag = false;
-
- // check if the task already exists
- for handle in self.worker_handles.iter() {
- if handle.lock().await.contains_flow(flow_id).await? {
- flag = true;
- break;
- }
- }
- flag
- };
- match (create_if_not_exists, or_replace, already_exist) {
- // do replace
- (_, true, true) => {
- info!("Replacing flow with id={}", flow_id);
- self.remove_flow(flow_id).await?;
- }
- (false, false, true) => FlowAlreadyExistSnafu { id: flow_id }.fail()?,
- // do nothing if exists
- (true, false, true) => {
- info!("Flow with id={} already exists, do nothing", flow_id);
- return Ok(None);
- }
- // create if not exists
- (_, _, false) => (),
- }
-
- if create_if_not_exists {
- // check if the task already exists
- for handle in self.worker_handles.iter() {
- if handle.lock().await.contains_flow(flow_id).await? {
- return Ok(None);
- }
- }
- }
-
let mut node_ctx = self.node_context.write().await;
// assign global id to source and sink table
for source in &source_table_ids {
@@ -877,9 +837,11 @@ impl FlowWorkerManager {
source_ids,
src_recvs: source_receivers,
expire_after,
+ or_replace,
create_if_not_exists,
err_collector,
};
+
handle.create_flow(create_request).await?;
info!("Successfully create flow with id={}", flow_id);
Ok(Some(flow_id))
diff --git a/src/flow/src/adapter/flownode_impl.rs b/src/flow/src/adapter/flownode_impl.rs
index 826b527fe868..f46f0d544021 100644
--- a/src/flow/src/adapter/flownode_impl.rs
+++ b/src/flow/src/adapter/flownode_impl.rs
@@ -25,11 +25,11 @@ use common_meta::error::{ExternalSnafu, Result, UnexpectedSnafu};
use common_meta::node_manager::Flownode;
use common_telemetry::{debug, trace};
use itertools::Itertools;
-use snafu::{OptionExt, ResultExt};
+use snafu::{IntoError, OptionExt, ResultExt};
use store_api::storage::RegionId;
use crate::adapter::{CreateFlowArgs, FlowWorkerManager};
-use crate::error::InternalSnafu;
+use crate::error::{CreateFlowSnafu, InsertIntoFlowSnafu, InternalSnafu};
use crate::metrics::METRIC_FLOW_TASK_COUNT;
use crate::repr::{self, DiffRow};
@@ -79,13 +79,15 @@ impl Flownode for FlowWorkerManager {
or_replace,
expire_after,
comment: Some(comment),
- sql,
+ sql: sql.clone(),
flow_options,
query_ctx,
};
let ret = self
.create_flow(args)
.await
+ .map_err(BoxedError::new)
+ .with_context(|_| CreateFlowSnafu { sql: sql.clone() })
.map_err(to_meta_err(snafu::location!()))?;
METRIC_FLOW_TASK_COUNT.inc();
Ok(FlowResponse {
@@ -229,13 +231,29 @@ impl Flownode for FlowWorkerManager {
})
.map(|r| (r, now, 1))
.collect_vec();
-
- self.handle_write_request(region_id.into(), rows, &table_types)
+ if let Err(err) = self
+ .handle_write_request(region_id.into(), rows, &table_types)
.await
- .map_err(|err| {
- common_telemetry::error!(err;"Failed to handle write request");
- to_meta_err(snafu::location!())(err)
- })?;
+ {
+ let err = BoxedError::new(err);
+ let flow_ids = self
+ .node_context
+ .read()
+ .await
+ .get_flow_ids(table_id)
+ .into_iter()
+ .flatten()
+ .cloned()
+ .collect_vec();
+ let err = InsertIntoFlowSnafu {
+ region_id,
+ flow_ids,
+ }
+ .into_error(err);
+ common_telemetry::error!(err; "Failed to handle write request");
+ let err = to_meta_err(snafu::location!())(err);
+ return Err(err);
+ }
}
Ok(Default::default())
}
diff --git a/src/flow/src/adapter/node_context.rs b/src/flow/src/adapter/node_context.rs
index 02612b6f5a2d..1896534e8657 100644
--- a/src/flow/src/adapter/node_context.rs
+++ b/src/flow/src/adapter/node_context.rs
@@ -71,6 +71,10 @@ impl FlownodeContext {
query_context: Default::default(),
}
}
+
+ pub fn get_flow_ids(&self, table_id: TableId) -> Option<&BTreeSet<FlowId>> {
+ self.source_to_tasks.get(&table_id)
+ }
}
/// a simple broadcast sender with backpressure, bounded capacity and blocking on send when send buf is full
diff --git a/src/flow/src/adapter/worker.rs b/src/flow/src/adapter/worker.rs
index 4a6b0ba963d9..46b4c0669a8b 100644
--- a/src/flow/src/adapter/worker.rs
+++ b/src/flow/src/adapter/worker.rs
@@ -247,15 +247,25 @@ impl<'s> Worker<'s> {
src_recvs: Vec<broadcast::Receiver<Batch>>,
// TODO(discord9): set expire duration for all arrangement and compare to sys timestamp instead
expire_after: Option<repr::Duration>,
+ or_replace: bool,
create_if_not_exists: bool,
err_collector: ErrCollector,
) -> Result<Option<FlowId>, Error> {
- let already_exists = self.task_states.contains_key(&flow_id);
- match (already_exists, create_if_not_exists) {
- (true, true) => return Ok(None),
- (true, false) => FlowAlreadyExistSnafu { id: flow_id }.fail()?,
- (false, _) => (),
- };
+ let already_exist = self.task_states.contains_key(&flow_id);
+ match (create_if_not_exists, or_replace, already_exist) {
+ // if replace, ignore that old flow exists
+ (_, true, true) => {
+ info!("Replacing flow with id={}", flow_id);
+ }
+ (false, false, true) => FlowAlreadyExistSnafu { id: flow_id }.fail()?,
+ // already exists, and not replace, return None
+ (true, false, true) => {
+ info!("Flow with id={} already exists, do nothing", flow_id);
+ return Ok(None);
+ }
+ // continue as normal
+ (_, _, false) => (),
+ }
let mut cur_task_state = ActiveDataflowState::<'s> {
err_collector,
@@ -341,6 +351,7 @@ impl<'s> Worker<'s> {
source_ids,
src_recvs,
expire_after,
+ or_replace,
create_if_not_exists,
err_collector,
} => {
@@ -352,6 +363,7 @@ impl<'s> Worker<'s> {
&source_ids,
src_recvs,
expire_after,
+ or_replace,
create_if_not_exists,
err_collector,
);
@@ -398,6 +410,7 @@ pub enum Request {
source_ids: Vec<GlobalId>,
src_recvs: Vec<broadcast::Receiver<Batch>>,
expire_after: Option<repr::Duration>,
+ or_replace: bool,
create_if_not_exists: bool,
err_collector: ErrCollector,
},
@@ -547,6 +560,7 @@ mod test {
source_ids: src_ids,
src_recvs: vec![rx],
expire_after: None,
+ or_replace: false,
create_if_not_exists: true,
err_collector: ErrCollector::default(),
};
diff --git a/src/flow/src/error.rs b/src/flow/src/error.rs
index 137e024307f9..0374e0050637 100644
--- a/src/flow/src/error.rs
+++ b/src/flow/src/error.rs
@@ -32,6 +32,27 @@ use crate::expr::EvalError;
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
+ #[snafu(display(
+ "Failed to insert into flow: region_id={}, flow_ids={:?}",
+ region_id,
+ flow_ids
+ ))]
+ InsertIntoFlow {
+ region_id: u64,
+ flow_ids: Vec<u64>,
+ source: BoxedError,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display("Error encountered while creating flow: {sql}"))]
+ CreateFlow {
+ sql: String,
+ source: BoxedError,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
#[snafu(display("External error"))]
External {
source: BoxedError,
@@ -207,16 +228,17 @@ pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
- Self::Eval { .. } | Self::JoinTask { .. } | Self::Datafusion { .. } => {
- StatusCode::Internal
- }
+ Self::Eval { .. }
+ | Self::JoinTask { .. }
+ | Self::Datafusion { .. }
+ | Self::InsertIntoFlow { .. } => StatusCode::Internal,
Self::FlowAlreadyExist { .. } => StatusCode::TableAlreadyExists,
Self::TableNotFound { .. }
| Self::TableNotFoundMeta { .. }
| Self::FlowNotFound { .. }
| Self::ListFlows { .. } => StatusCode::TableNotFound,
Self::Plan { .. } | Self::Datatypes { .. } => StatusCode::PlanQuery,
- Self::InvalidQuery { .. } => StatusCode::EngineExecuteQuery,
+ Self::InvalidQuery { .. } | Self::CreateFlow { .. } => StatusCode::EngineExecuteQuery,
Self::Unexpected { .. } => StatusCode::Unexpected,
Self::NotImplemented { .. } | Self::UnsupportedTemporalFilter { .. } => {
StatusCode::Unsupported
diff --git a/src/flow/src/server.rs b/src/flow/src/server.rs
index d22ba220441b..3ed8b7efa959 100644
--- a/src/flow/src/server.rs
+++ b/src/flow/src/server.rs
@@ -50,8 +50,8 @@ use tonic::{Request, Response, Status};
use crate::adapter::{CreateFlowArgs, FlowWorkerManagerRef};
use crate::error::{
- to_status_with_last_err, CacheRequiredSnafu, ExternalSnafu, FlowNotFoundSnafu, ListFlowsSnafu,
- ParseAddrSnafu, ShutdownServerSnafu, StartServerSnafu, UnexpectedSnafu,
+ to_status_with_last_err, CacheRequiredSnafu, CreateFlowSnafu, ExternalSnafu, FlowNotFoundSnafu,
+ ListFlowsSnafu, ParseAddrSnafu, ShutdownServerSnafu, StartServerSnafu, UnexpectedSnafu,
};
use crate::heartbeat::HeartbeatTask;
use crate::metrics::{METRIC_FLOW_PROCESSING_TIME, METRIC_FLOW_ROWS};
@@ -392,7 +392,13 @@ impl FlownodeBuilder {
.build(),
),
};
- manager.create_flow(args).await?;
+ manager
+ .create_flow(args)
+ .await
+ .map_err(BoxedError::new)
+ .with_context(|_| CreateFlowSnafu {
+ sql: info.raw_sql().clone(),
+ })?;
}
Ok(cnt)
diff --git a/tests/cases/standalone/common/alter/change_col_fulltext_options.result b/tests/cases/standalone/common/alter/change_col_fulltext_options.result
index 46bd510462f5..fa0443293c01 100644
--- a/tests/cases/standalone/common/alter/change_col_fulltext_options.result
+++ b/tests/cases/standalone/common/alter/change_col_fulltext_options.result
@@ -32,7 +32,7 @@ INSERT INTO test VALUES ('hello', '2020-01-01 00:00:00'),
Affected Rows: 4
-SELECT * FROM test WHERE MATCHES(message, 'hello');
+SELECT * FROM test WHERE MATCHES(message, 'hello') ORDER BY message;
+-------------+---------------------+
| message | time |
@@ -46,7 +46,7 @@ ALTER TABLE test MODIFY COLUMN message SET FULLTEXT WITH(analyzer = 'Chinese', c
Affected Rows: 0
-SELECT * FROM test WHERE MATCHES(message, 'hello');
+SELECT * FROM test WHERE MATCHES(message, 'hello') ORDER BY message;
+-------------+---------------------+
| message | time |
@@ -63,15 +63,15 @@ INSERT INTO test VALUES ('hello NiKo', '2020-01-03 00:00:00'),
Affected Rows: 4
-SELECT * FROM test WHERE MATCHES(message, 'hello');
+SELECT * FROM test WHERE MATCHES(message, 'hello') ORDER BY message;
+-------------+---------------------+
| message | time |
+-------------+---------------------+
-| hello NiKo | 2020-01-03T00:00:00 |
| NiKo hello | 2020-01-03T00:00:01 |
-| hello hello | 2020-01-04T00:00:00 |
| hello | 2020-01-01T00:00:00 |
+| hello NiKo | 2020-01-03T00:00:00 |
+| hello hello | 2020-01-04T00:00:00 |
| hello world | 2020-01-02T00:00:00 |
| world hello | 2020-01-02T00:00:01 |
+-------------+---------------------+
diff --git a/tests/cases/standalone/common/alter/change_col_fulltext_options.sql b/tests/cases/standalone/common/alter/change_col_fulltext_options.sql
index 88c8b3b1800f..6197ba1dd106 100644
--- a/tests/cases/standalone/common/alter/change_col_fulltext_options.sql
+++ b/tests/cases/standalone/common/alter/change_col_fulltext_options.sql
@@ -13,18 +13,18 @@ INSERT INTO test VALUES ('hello', '2020-01-01 00:00:00'),
('hello world', '2020-01-02 00:00:00'),
('world hello', '2020-01-02 00:00:01');
-SELECT * FROM test WHERE MATCHES(message, 'hello');
+SELECT * FROM test WHERE MATCHES(message, 'hello') ORDER BY message;
ALTER TABLE test MODIFY COLUMN message SET FULLTEXT WITH(analyzer = 'Chinese', case_sensitive = 'true');
-SELECT * FROM test WHERE MATCHES(message, 'hello');
+SELECT * FROM test WHERE MATCHES(message, 'hello') ORDER BY message;
INSERT INTO test VALUES ('hello NiKo', '2020-01-03 00:00:00'),
('NiKo hello', '2020-01-03 00:00:01'),
('hello hello', '2020-01-04 00:00:00'),
('NiKo, NiKo', '2020-01-04 00:00:01');
-SELECT * FROM test WHERE MATCHES(message, 'hello');
+SELECT * FROM test WHERE MATCHES(message, 'hello') ORDER BY message;
-- SQLNESS ARG restart=true
SHOW CREATE TABLE test;
diff --git a/tests/cases/standalone/common/flow/flow_more_usecase.result b/tests/cases/standalone/common/flow/flow_more_usecase.result
new file mode 100644
index 000000000000..304589f6a552
--- /dev/null
+++ b/tests/cases/standalone/common/flow/flow_more_usecase.result
@@ -0,0 +1,56 @@
+CREATE TABLE `api_requests` (
+ `timestamp` TIMESTAMP NOT NULL,
+ `request_id` STRING NOT NULL,
+ `upstream_id` STRING NOT NULL,
+ `application_id` STRING NULL,
+ `url` STRING NOT NULL,
+ `method` STRING NOT NULL,
+ `status_code` INTEGER NOT NULL,
+ `request_headers` JSON NULL,
+ `request_body` STRING NULL,
+ `response_headers` JSON NULL,
+ `response_body` STRING NULL,
+ `latency_ms` INTEGER NOT NULL,
+ `client_ip` STRING NULL,
+ `user_agent` STRING NULL,
+ TIME INDEX (`timestamp`)
+)
+WITH(
+ append_mode = 'true'
+);
+
+Affected Rows: 0
+
+CREATE TABLE api_request_volume_upstream_stats (
+ `upstream_id` STRING NOT NULL,
+ `time_window` TIMESTAMP NOT NULL,
+ `request_count` BIGINT NOT NULL,
+ TIME INDEX (`time_window`)
+);
+
+Affected Rows: 0
+
+CREATE FLOW api_request_volume_by_upstream
+SINK TO api_request_volume_upstream_stats
+AS
+SELECT
+ upstream_id,
+ date_bin(INTERVAL '1 hour', timestamp, '2024-01-01 00:00:00'::TimestampNanosecond) AS time_window,
+ COUNT(*) AS request_count
+FROM api_requests
+GROUP BY upstream_id, time_window;
+
+Affected Rows: 0
+
+DROP FLOW api_request_volume_by_upstream;
+
+Affected Rows: 0
+
+DROP TABLE api_request_volume_upstream_stats;
+
+Affected Rows: 0
+
+DROP TABLE api_requests;
+
+Affected Rows: 0
+
diff --git a/tests/cases/standalone/common/flow/flow_more_usecase.sql b/tests/cases/standalone/common/flow/flow_more_usecase.sql
new file mode 100644
index 000000000000..fb5dc12d5457
--- /dev/null
+++ b/tests/cases/standalone/common/flow/flow_more_usecase.sql
@@ -0,0 +1,41 @@
+CREATE TABLE `api_requests` (
+ `timestamp` TIMESTAMP NOT NULL,
+ `request_id` STRING NOT NULL,
+ `upstream_id` STRING NOT NULL,
+ `application_id` STRING NULL,
+ `url` STRING NOT NULL,
+ `method` STRING NOT NULL,
+ `status_code` INTEGER NOT NULL,
+ `request_headers` JSON NULL,
+ `request_body` STRING NULL,
+ `response_headers` JSON NULL,
+ `response_body` STRING NULL,
+ `latency_ms` INTEGER NOT NULL,
+ `client_ip` STRING NULL,
+ `user_agent` STRING NULL,
+ TIME INDEX (`timestamp`)
+)
+WITH(
+ append_mode = 'true'
+);
+
+CREATE TABLE api_request_volume_upstream_stats (
+ `upstream_id` STRING NOT NULL,
+ `time_window` TIMESTAMP NOT NULL,
+ `request_count` BIGINT NOT NULL,
+ TIME INDEX (`time_window`)
+);
+
+CREATE FLOW api_request_volume_by_upstream
+SINK TO api_request_volume_upstream_stats
+AS
+SELECT
+ upstream_id,
+ date_bin(INTERVAL '1 hour', timestamp, '2024-01-01 00:00:00'::TimestampNanosecond) AS time_window,
+ COUNT(*) AS request_count
+FROM api_requests
+GROUP BY upstream_id, time_window;
+
+DROP FLOW api_request_volume_by_upstream;
+DROP TABLE api_request_volume_upstream_stats;
+DROP TABLE api_requests;
diff --git a/tests/cases/standalone/common/flow/show_create_flow.result b/tests/cases/standalone/common/flow/show_create_flow.result
index 14e80129446d..2a177bddee26 100644
--- a/tests/cases/standalone/common/flow/show_create_flow.result
+++ b/tests/cases/standalone/common/flow/show_create_flow.result
@@ -365,33 +365,48 @@ SELECT number FROM out_num_cnt_show;
| 16 |
+--------+
--- should mismatch
+-- should mismatch, hence the old flow remains
CREATE OR REPLACE FLOW filter_numbers_show SINK TO out_num_cnt_show AS SELECT number AS n1, number AS n2 FROM numbers_input_show where number > 15;
Error: 3001(EngineExecuteQuery), Invalid query: Column 1(name is 'ts', flow inferred name is 'n2')'s data type mismatch, expect Timestamp(Millisecond(TimestampMillisecondType)) got Int32(Int32Type)
--- should mismatch
+-- should mismatch, hence the old flow remains
CREATE OR REPLACE FLOW filter_numbers_show SINK TO out_num_cnt_show AS SELECT number AS n1, number AS n2, number AS n3 FROM numbers_input_show where number > 15;
Error: 3001(EngineExecuteQuery), Invalid query: Column 1(name is 'ts', flow inferred name is 'n2')'s data type mismatch, expect Timestamp(Millisecond(TimestampMillisecondType)) got Int32(Int32Type)
-INSERT INTO numbers_input_show VALUES (10, 6),(15, 7),(18, 3);
+SELECT flow_definition FROM INFORMATION_SCHEMA.FLOWS WHERE flow_name='filter_numbers_show';
-Affected Rows: 3
++---------------------------------------------------------------+
+| flow_definition |
++---------------------------------------------------------------+
+| SELECT number AS n1 FROM numbers_input_show WHERE number > 10 |
++---------------------------------------------------------------+
+
+INSERT INTO numbers_input_show VALUES (10, 6),(11, 8),(15, 7),(18, 3);
+
+Affected Rows: 4
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
ADMIN FLUSH_FLOW('filter_numbers_show');
-Error: 1003(Internal), Internal error: 1003
++-----------------------------------------+
+| ADMIN FLUSH_FLOW('filter_numbers_show') |
++-----------------------------------------+
+| FLOW_FLUSHED |
++-----------------------------------------+
--- sink table stays the same since the flow error out due to column mismatch
+-- sink table shows new 11 since old flow remains
SELECT number FROM out_num_cnt_show;
+--------+
| number |
+--------+
+| 11 |
+| 15 |
| 15 |
| 16 |
+| 18 |
+--------+
DROP FLOW filter_numbers_show;
diff --git a/tests/cases/standalone/common/flow/show_create_flow.sql b/tests/cases/standalone/common/flow/show_create_flow.sql
index f445c4f254c4..0c4110c19629 100644
--- a/tests/cases/standalone/common/flow/show_create_flow.sql
+++ b/tests/cases/standalone/common/flow/show_create_flow.sql
@@ -147,19 +147,20 @@ ADMIN FLUSH_FLOW('filter_numbers_show');
SELECT number FROM out_num_cnt_show;
-
--- should mismatch
+-- should mismatch, hence the old flow remains
CREATE OR REPLACE FLOW filter_numbers_show SINK TO out_num_cnt_show AS SELECT number AS n1, number AS n2 FROM numbers_input_show where number > 15;
--- should mismatch
+-- should mismatch, hence the old flow remains
CREATE OR REPLACE FLOW filter_numbers_show SINK TO out_num_cnt_show AS SELECT number AS n1, number AS n2, number AS n3 FROM numbers_input_show where number > 15;
-INSERT INTO numbers_input_show VALUES (10, 6),(15, 7),(18, 3);
+SELECT flow_definition FROM INFORMATION_SCHEMA.FLOWS WHERE flow_name='filter_numbers_show';
+
+INSERT INTO numbers_input_show VALUES (10, 6),(11, 8),(15, 7),(18, 3);
-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED |
ADMIN FLUSH_FLOW('filter_numbers_show');
--- sink table stays the same since the flow error out due to column mismatch
+-- sink table shows new 11 since old flow remains
SELECT number FROM out_num_cnt_show;
DROP FLOW filter_numbers_show;
|
refactor
|
flow replace check&better error msg (#5277)
|
cc8d6b1200904d56f45aacf2a52736f19186db3d
|
2024-05-08 05:02:22
|
zyy17
|
refactor: move `Plugins` to `plugins.rs` and use rwlock (#3862)
| false
|
diff --git a/src/common/base/src/lib.rs b/src/common/base/src/lib.rs
index d4e1454e9d72..539da1ba8cef 100644
--- a/src/common/base/src/lib.rs
+++ b/src/common/base/src/lib.rs
@@ -15,68 +15,12 @@
pub mod bit_vec;
pub mod buffer;
pub mod bytes;
+pub mod plugins;
#[allow(clippy::all)]
pub mod readable_size;
pub mod secrets;
-use core::any::Any;
-use std::sync::{Arc, Mutex, MutexGuard};
-
pub type AffectedRows = usize;
pub use bit_vec::BitVec;
-
-/// [`Plugins`] is a wrapper of Arc contents.
-/// Make it Cloneable and we can treat it like an Arc struct.
-#[derive(Default, Clone)]
-pub struct Plugins {
- inner: Arc<Mutex<anymap::Map<dyn Any + Send + Sync>>>,
-}
-
-impl Plugins {
- pub fn new() -> Self {
- Self {
- inner: Arc::new(Mutex::new(anymap::Map::new())),
- }
- }
-
- fn lock(&self) -> MutexGuard<anymap::Map<dyn Any + Send + Sync>> {
- self.inner.lock().unwrap()
- }
-
- pub fn insert<T: 'static + Send + Sync>(&self, value: T) {
- let _ = self.lock().insert(value);
- }
-
- pub fn get<T: 'static + Send + Sync + Clone>(&self) -> Option<T> {
- let binding = self.lock();
- binding.get::<T>().cloned()
- }
-
- pub fn map_mut<T: 'static + Send + Sync, F, R>(&self, mapper: F) -> R
- where
- F: FnOnce(Option<&mut T>) -> R,
- {
- let mut binding = self.lock();
- let opt = binding.get_mut::<T>();
- mapper(opt)
- }
-
- pub fn map<T: 'static + Send + Sync, F, R>(&self, mapper: F) -> Option<R>
- where
- F: FnOnce(&T) -> R,
- {
- let binding = self.lock();
- binding.get::<T>().map(mapper)
- }
-
- pub fn len(&self) -> usize {
- let binding = self.lock();
- binding.len()
- }
-
- pub fn is_empty(&self) -> bool {
- let binding = self.lock();
- binding.is_empty()
- }
-}
+pub use plugins::Plugins;
diff --git a/src/common/base/src/plugins.rs b/src/common/base/src/plugins.rs
new file mode 100644
index 000000000000..84d78b0c91aa
--- /dev/null
+++ b/src/common/base/src/plugins.rs
@@ -0,0 +1,127 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::any::Any;
+use std::sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard};
+
+/// [`Plugins`] is a wrapper of [AnyMap](https://github.com/chris-morgan/anymap) and provides a thread-safe way to store and retrieve plugins.
+/// Make it Cloneable and we can treat it like an Arc struct.
+#[derive(Default, Clone)]
+pub struct Plugins {
+ inner: Arc<RwLock<anymap::Map<dyn Any + Send + Sync>>>,
+}
+
+impl Plugins {
+ pub fn new() -> Self {
+ Self {
+ inner: Arc::new(RwLock::new(anymap::Map::new())),
+ }
+ }
+
+ pub fn insert<T: 'static + Send + Sync>(&self, value: T) {
+ let _ = self.write().insert(value);
+ }
+
+ pub fn get<T: 'static + Send + Sync + Clone>(&self) -> Option<T> {
+ self.read().get::<T>().cloned()
+ }
+
+ pub fn map_mut<T: 'static + Send + Sync, F, R>(&self, mapper: F) -> R
+ where
+ F: FnOnce(Option<&mut T>) -> R,
+ {
+ let mut binding = self.write();
+ let opt = binding.get_mut::<T>();
+ mapper(opt)
+ }
+
+ pub fn map<T: 'static + Send + Sync, F, R>(&self, mapper: F) -> Option<R>
+ where
+ F: FnOnce(&T) -> R,
+ {
+ self.read().get::<T>().map(mapper)
+ }
+
+ pub fn len(&self) -> usize {
+ self.read().len()
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.read().is_empty()
+ }
+
+ fn read(&self) -> RwLockReadGuard<anymap::Map<dyn Any + Send + Sync>> {
+ self.inner.read().unwrap()
+ }
+
+ fn write(&self) -> RwLockWriteGuard<anymap::Map<dyn Any + Send + Sync>> {
+ self.inner.write().unwrap()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_plugins() {
+ #[derive(Debug, Clone)]
+ struct FooPlugin {
+ x: i32,
+ }
+
+ #[derive(Debug, Clone)]
+ struct BarPlugin {
+ y: String,
+ }
+
+ let plugins = Plugins::new();
+
+ let m = plugins.clone();
+ let thread1 = std::thread::spawn(move || {
+ m.insert(FooPlugin { x: 42 });
+
+ if let Some(foo) = m.get::<FooPlugin>() {
+ assert_eq!(foo.x, 42);
+ }
+
+ assert_eq!(m.map::<FooPlugin, _, _>(|foo| foo.x * 2), Some(84));
+ });
+
+ let m = plugins.clone();
+ let thread2 = std::thread::spawn(move || {
+ m.clone().insert(BarPlugin {
+ y: "hello".to_string(),
+ });
+
+ if let Some(bar) = m.get::<BarPlugin>() {
+ assert_eq!(bar.y, "hello");
+ }
+
+ m.map_mut::<BarPlugin, _, _>(|bar| {
+ if let Some(bar) = bar {
+ bar.y = "world".to_string();
+ }
+ });
+
+ assert_eq!(m.get::<BarPlugin>().unwrap().y, "world");
+ });
+
+ thread1.join().unwrap();
+ thread2.join().unwrap();
+
+ assert_eq!(plugins.len(), 2);
+ assert!(!plugins.is_empty());
+ }
+}
|
refactor
|
move `Plugins` to `plugins.rs` and use rwlock (#3862)
|
b0925d94edccf5bd4c1561f58311f1bccc412d0b
|
2023-02-03 16:12:03
|
Yingwen
|
feat: Implement lock component for ProcedureManager (#937)
| false
|
diff --git a/src/common/procedure/src/lib.rs b/src/common/procedure/src/lib.rs
index a8d1628bb072..8108025cc90a 100644
--- a/src/common/procedure/src/lib.rs
+++ b/src/common/procedure/src/lib.rs
@@ -15,6 +15,8 @@
//! Common traits and structures for the procedure framework.
pub mod error;
+#[allow(dead_code)]
+mod local;
mod procedure;
// TODO(yingwen): Remove this attribute once ProcedureManager is implemented.
#[allow(dead_code)]
diff --git a/src/common/procedure/src/local.rs b/src/common/procedure/src/local.rs
new file mode 100644
index 000000000000..507dc5e3e93c
--- /dev/null
+++ b/src/common/procedure/src/local.rs
@@ -0,0 +1,114 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+mod lock;
+
+use std::sync::{Arc, Mutex};
+
+use tokio::sync::Notify;
+
+use crate::{LockKey, ProcedureId, ProcedureState};
+
+/// Mutable metadata of a procedure during execution.
+#[derive(Debug)]
+struct ExecMeta {
+ /// Current procedure state.
+ state: ProcedureState,
+}
+
+/// Shared metadata of a procedure.
+///
+/// # Note
+/// [Notify] is not a condition variable, we can't guarantee the waiters are notified
+/// if they didn't call `notified()` before we signal the notify. So we
+/// 1. use dedicated notify for each condition, such as waiting for a lock, waiting
+/// for children;
+/// 2. always use `notify_one` and ensure there are only one waiter.
+#[derive(Debug)]
+struct ProcedureMeta {
+ /// Id of this procedure.
+ id: ProcedureId,
+ /// Notify to wait for a lock.
+ lock_notify: Notify,
+ /// Parent procedure id.
+ parent_id: Option<ProcedureId>,
+ /// Notify to wait for subprocedures.
+ child_notify: Notify,
+ /// Locks inherted from the parent procedure.
+ parent_locks: Vec<LockKey>,
+ /// Lock not in `parent_locks` but required by this procedure.
+ ///
+ /// If the parent procedure already owns the lock that this procedure
+ /// needs, we set this field to `None`.
+ lock_key: Option<LockKey>,
+ /// Mutable status during execution.
+ exec_meta: Mutex<ExecMeta>,
+}
+
+impl ProcedureMeta {
+ /// Return all locks the procedure needs.
+ fn locks_needed(&self) -> Vec<LockKey> {
+ let num_locks = self.parent_locks.len() + if self.lock_key.is_some() { 1 } else { 0 };
+ let mut locks = Vec::with_capacity(num_locks);
+ locks.extend_from_slice(&self.parent_locks);
+ if let Some(key) = &self.lock_key {
+ locks.push(key.clone());
+ }
+
+ locks
+ }
+}
+
+/// Reference counted pointer to [ProcedureMeta].
+type ProcedureMetaRef = Arc<ProcedureMeta>;
+
+/// Create a new [ProcedureMeta] for test purpose.
+#[cfg(test)]
+fn procedure_meta_for_test() -> ProcedureMeta {
+ ProcedureMeta {
+ id: ProcedureId::random(),
+ lock_notify: Notify::new(),
+ parent_id: None,
+ child_notify: Notify::new(),
+ parent_locks: Vec::new(),
+ lock_key: None,
+ exec_meta: Mutex::new(ExecMeta {
+ state: ProcedureState::Running,
+ }),
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_locks_needed() {
+ let mut meta = procedure_meta_for_test();
+ let locks = meta.locks_needed();
+ assert!(locks.is_empty());
+
+ let parent_locks = vec![LockKey::new("a"), LockKey::new("b")];
+ meta.parent_locks = parent_locks.clone();
+ let locks = meta.locks_needed();
+ assert_eq!(parent_locks, locks);
+
+ meta.lock_key = Some(LockKey::new("c"));
+ let locks = meta.locks_needed();
+ assert_eq!(
+ vec![LockKey::new("a"), LockKey::new("b"), LockKey::new("c")],
+ locks
+ );
+ }
+}
diff --git a/src/common/procedure/src/local/lock.rs b/src/common/procedure/src/local/lock.rs
new file mode 100644
index 000000000000..bd337c948da2
--- /dev/null
+++ b/src/common/procedure/src/local/lock.rs
@@ -0,0 +1,214 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::{HashMap, VecDeque};
+use std::sync::RwLock;
+
+use crate::local::ProcedureMetaRef;
+use crate::ProcedureId;
+
+/// A lock entry.
+#[derive(Debug)]
+struct Lock {
+ /// Current lock owner.
+ owner: ProcedureMetaRef,
+ /// Waiter procedures.
+ waiters: VecDeque<ProcedureMetaRef>,
+}
+
+impl Lock {
+ /// Returns a [Lock] with specific `owner` procedure.
+ fn from_owner(owner: ProcedureMetaRef) -> Lock {
+ Lock {
+ owner,
+ waiters: VecDeque::new(),
+ }
+ }
+
+ /// Try to pop a waiter from the waiter list, set it as owner
+ /// and wake up the new owner.
+ ///
+ /// Returns false if there is no waiter in the waiter list.
+ fn switch_owner(&mut self) -> bool {
+ if let Some(waiter) = self.waiters.pop_front() {
+ // Update owner.
+ self.owner = waiter.clone();
+ // We need to use notify_one() since the waiter may have not called `notified()` yet.
+ waiter.lock_notify.notify_one();
+ true
+ } else {
+ false
+ }
+ }
+}
+
+/// Manages lock entries for procedures.
+struct LockMap {
+ locks: RwLock<HashMap<String, Lock>>,
+}
+
+impl LockMap {
+ /// Returns a new [LockMap].
+ fn new() -> LockMap {
+ LockMap {
+ locks: RwLock::new(HashMap::new()),
+ }
+ }
+
+ /// Acquire lock by `key` for procedure with specific `meta`.
+ ///
+ /// Though `meta` is cloneable, callers must ensure that only one `meta`
+ /// is acquiring and holding the lock at the same time.
+ ///
+ /// # Panics
+ /// Panics if the procedure acquires the lock recursively.
+ async fn acquire_lock(&self, key: &str, meta: ProcedureMetaRef) {
+ assert!(!self.hold_lock(key, meta.id));
+
+ {
+ let mut locks = self.locks.write().unwrap();
+ if let Some(lock) = locks.get_mut(key) {
+ // Lock already exists, but we don't expect that a procedure acquires
+ // the same lock again.
+ assert_ne!(lock.owner.id, meta.id);
+
+ // Add this procedure to the waiter list. Here we don't check
+ // whether the procedure is already in the waiter list as we
+ // expect that a procedure should not wait for two lock simultaneously.
+ lock.waiters.push_back(meta.clone());
+ } else {
+ locks.insert(key.to_string(), Lock::from_owner(meta));
+
+ return;
+ }
+ }
+
+ // Wait for notify.
+ meta.lock_notify.notified().await;
+
+ assert!(self.hold_lock(key, meta.id));
+ }
+
+ /// Release lock by `key`.
+ fn release_lock(&self, key: &str, procedure_id: ProcedureId) {
+ let mut locks = self.locks.write().unwrap();
+ if let Some(lock) = locks.get_mut(key) {
+ if lock.owner.id != procedure_id {
+ // This is not the lock owner.
+ return;
+ }
+
+ if !lock.switch_owner() {
+ // No body waits for this lock, we can remove the lock entry.
+ locks.remove(key);
+ }
+ }
+ }
+
+ /// Returns true if the procedure with specific `procedure_id` holds the
+ /// lock of `key`.
+ fn hold_lock(&self, key: &str, procedure_id: ProcedureId) -> bool {
+ let locks = self.locks.read().unwrap();
+ locks
+ .get(key)
+ .map(|lock| lock.owner.id == procedure_id)
+ .unwrap_or(false)
+ }
+
+ /// Returns true if the procedure is waiting for the lock `key`.
+ #[cfg(test)]
+ fn waiting_lock(&self, key: &str, procedure_id: ProcedureId) -> bool {
+ let locks = self.locks.read().unwrap();
+ locks
+ .get(key)
+ .map(|lock| lock.waiters.iter().any(|meta| meta.id == procedure_id))
+ .unwrap_or(false)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Arc;
+
+ use super::*;
+ use crate::local;
+
+ #[test]
+ fn test_lock_no_waiter() {
+ let meta = Arc::new(local::procedure_meta_for_test());
+ let mut lock = Lock::from_owner(meta);
+
+ assert!(!lock.switch_owner());
+ }
+
+ #[tokio::test]
+ async fn test_lock_with_waiter() {
+ let owner = Arc::new(local::procedure_meta_for_test());
+ let mut lock = Lock::from_owner(owner);
+
+ let waiter = Arc::new(local::procedure_meta_for_test());
+ lock.waiters.push_back(waiter.clone());
+
+ assert!(lock.switch_owner());
+ assert!(lock.waiters.is_empty());
+
+ waiter.lock_notify.notified().await;
+ assert_eq!(lock.owner.id, waiter.id);
+ }
+
+ #[tokio::test]
+ async fn test_lock_map() {
+ let key = "hello";
+
+ let owner = Arc::new(local::procedure_meta_for_test());
+ let lock_map = Arc::new(LockMap::new());
+ lock_map.acquire_lock(key, owner.clone()).await;
+
+ let waiter = Arc::new(local::procedure_meta_for_test());
+ let waiter_id = waiter.id;
+
+ // Waiter release the lock, this should not take effect.
+ lock_map.release_lock(key, waiter_id);
+
+ let lock_map2 = lock_map.clone();
+ let owner_id = owner.id;
+ let handle = tokio::spawn(async move {
+ assert!(lock_map2.hold_lock(key, owner_id));
+ assert!(!lock_map2.hold_lock(key, waiter_id));
+
+ // Waiter wait for lock.
+ lock_map2.acquire_lock(key, waiter.clone()).await;
+
+ assert!(lock_map2.hold_lock(key, waiter_id));
+ });
+
+ // Owner still holds the lock.
+ assert!(lock_map.hold_lock(key, owner_id));
+
+ // Wait until the waiter acquired the lock
+ while !lock_map.waiting_lock(key, waiter_id) {
+ tokio::time::sleep(std::time::Duration::from_millis(5)).await;
+ }
+ // Release lock
+ lock_map.release_lock(key, owner_id);
+ assert!(!lock_map.hold_lock(key, owner_id));
+
+ // Wait for task.
+ handle.await.unwrap();
+ // The waiter should hold the lock now.
+ assert!(lock_map.hold_lock(key, waiter_id));
+
+ lock_map.release_lock(key, waiter_id);
+ }
+}
|
feat
|
Implement lock component for ProcedureManager (#937)
|
0bf26642a460880cba74c093a549790fa0527347
|
2023-09-26 14:07:04
|
Zhenchi
|
feat: re-support query engine execute dml (#2484)
| false
|
diff --git a/src/cmd/src/cli/repl.rs b/src/cmd/src/cli/repl.rs
index c1b619cf85b9..c5f256a3c5b5 100644
--- a/src/cmd/src/cli/repl.rs
+++ b/src/cmd/src/cli/repl.rs
@@ -261,6 +261,7 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
let state = Arc::new(QueryEngineState::new(
catalog_list,
None,
+ None,
false,
plugins.clone(),
));
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index 3a526235ca50..885ed8bbddb5 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -338,6 +338,7 @@ impl DatanodeBuilder {
// query engine in datanode only executes plan with resolved table source.
MemoryCatalogManager::with_default_setup(),
None,
+ None,
false,
plugins,
);
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index bffefabd47e7..a7cef4ba6786 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -53,7 +53,7 @@ use meta_client::client::{MetaClient, MetaClientBuilder};
use operator::delete::{Deleter, DeleterRef};
use operator::insert::{Inserter, InserterRef};
use operator::statement::StatementExecutor;
-use operator::table::table_idents_to_full_name;
+use operator::table::{table_idents_to_full_name, TableMutationOperator};
use partition::manager::PartitionRuleManager;
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
use query::plan::LogicalPlan;
@@ -163,14 +163,6 @@ impl Instance {
catalog_manager.datanode_manager().clone(),
);
- let query_engine = QueryEngineFactory::new_with_plugins(
- catalog_manager.clone(),
- Some(region_query_handler.clone()),
- true,
- plugins.clone(),
- )
- .query_engine();
-
let inserter = Arc::new(Inserter::new(
catalog_manager.clone(),
partition_manager.clone(),
@@ -182,6 +174,20 @@ impl Instance {
datanode_clients,
));
+ let table_mutation_handler = Arc::new(TableMutationOperator::new(
+ inserter.clone(),
+ deleter.clone(),
+ ));
+
+ let query_engine = QueryEngineFactory::new_with_plugins(
+ catalog_manager.clone(),
+ Some(region_query_handler.clone()),
+ Some(table_mutation_handler),
+ true,
+ plugins.clone(),
+ )
+ .query_engine();
+
let statement_executor = Arc::new(StatementExecutor::new(
catalog_manager.clone(),
query_engine.clone(),
@@ -189,7 +195,6 @@ impl Instance {
meta_backend.clone(),
catalog_manager.clone(),
inserter.clone(),
- deleter.clone(),
));
plugins.insert::<StatementExecutorRef>(statement_executor.clone());
@@ -301,9 +306,25 @@ impl Instance {
let region_query_handler =
FrontendRegionQueryHandler::arc(partition_manager.clone(), datanode_manager.clone());
+ let inserter = Arc::new(Inserter::new(
+ catalog_manager.clone(),
+ partition_manager.clone(),
+ datanode_manager.clone(),
+ ));
+ let deleter = Arc::new(Deleter::new(
+ catalog_manager.clone(),
+ partition_manager,
+ datanode_manager.clone(),
+ ));
+ let table_mutation_handler = Arc::new(TableMutationOperator::new(
+ inserter.clone(),
+ deleter.clone(),
+ ));
+
let query_engine = QueryEngineFactory::new_with_plugins(
catalog_manager.clone(),
Some(region_query_handler),
+ Some(table_mutation_handler),
true,
plugins.clone(),
)
@@ -317,25 +338,12 @@ impl Instance {
let cache_invalidator = Arc::new(DummyCacheInvalidator);
let ddl_executor = Arc::new(DdlManager::new(
procedure_manager,
- datanode_manager.clone(),
+ datanode_manager,
cache_invalidator.clone(),
table_metadata_manager.clone(),
Arc::new(StandaloneTableMetadataCreator::new(kv_backend.clone())),
));
- let partition_manager = Arc::new(PartitionRuleManager::new(kv_backend.clone()));
-
- let inserter = Arc::new(Inserter::new(
- catalog_manager.clone(),
- partition_manager.clone(),
- datanode_manager.clone(),
- ));
- let deleter = Arc::new(Deleter::new(
- catalog_manager.clone(),
- partition_manager,
- datanode_manager,
- ));
-
let statement_executor = Arc::new(StatementExecutor::new(
catalog_manager.clone(),
query_engine.clone(),
@@ -343,7 +351,6 @@ impl Instance {
kv_backend.clone(),
cache_invalidator,
inserter.clone(),
- deleter.clone(),
));
Ok(Instance {
diff --git a/src/operator/src/delete.rs b/src/operator/src/delete.rs
index 546d45d5f8f9..50111dd39113 100644
--- a/src/operator/src/delete.rs
+++ b/src/operator/src/delete.rs
@@ -98,7 +98,7 @@ impl Deleter {
&self,
request: TableDeleteRequest,
ctx: QueryContextRef,
- ) -> Result<AffectedRows> {
+ ) -> Result<usize> {
let catalog = request.catalog_name.as_str();
let schema = request.schema_name.as_str();
let table = request.table_name.as_str();
@@ -108,7 +108,9 @@ impl Deleter {
let deletes = TableToRegion::new(&table_info, &self.partition_manager)
.convert(request)
.await?;
- self.do_request(deletes, ctx.trace_id(), 0).await
+
+ let affected_rows = self.do_request(deletes, ctx.trace_id(), 0).await?;
+ Ok(affected_rows as _)
}
}
diff --git a/src/operator/src/req_convert/insert/table_to_region.rs b/src/operator/src/req_convert/insert/table_to_region.rs
index fa7181eebbfd..5ece06b79f96 100644
--- a/src/operator/src/req_convert/insert/table_to_region.rs
+++ b/src/operator/src/req_convert/insert/table_to_region.rs
@@ -145,7 +145,6 @@ mod tests {
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name: "table_1".to_string(),
columns_values: HashMap::from([("a".to_string(), vector)]),
- region_number: 0,
}
}
diff --git a/src/operator/src/statement.rs b/src/operator/src/statement.rs
index e6b46d26a601..f2d371b6d23f 100644
--- a/src/operator/src/statement.rs
+++ b/src/operator/src/statement.rs
@@ -48,7 +48,6 @@ use table::engine::TableReference;
use table::requests::{CopyDatabaseRequest, CopyDirection, CopyTableRequest};
use table::TableRef;
-use crate::delete::DeleterRef;
use crate::error::{
self, CatalogSnafu, ExecLogicalPlanSnafu, ExternalSnafu, InvalidSqlSnafu, PlanStatementSnafu,
Result, TableNotFoundSnafu,
@@ -66,7 +65,6 @@ pub struct StatementExecutor {
partition_manager: PartitionRuleManagerRef,
cache_invalidator: CacheInvalidatorRef,
inserter: InserterRef,
- deleter: DeleterRef,
}
impl StatementExecutor {
@@ -77,7 +75,6 @@ impl StatementExecutor {
kv_backend: KvBackendRef,
cache_invalidator: CacheInvalidatorRef,
inserter: InserterRef,
- deleter: DeleterRef,
) -> Self {
Self {
catalog_manager,
@@ -87,7 +84,6 @@ impl StatementExecutor {
partition_manager: Arc::new(PartitionRuleManager::new(kv_backend)),
cache_invalidator,
inserter,
- deleter,
}
}
@@ -104,14 +100,12 @@ impl StatementExecutor {
pub async fn execute_sql(&self, stmt: Statement, query_ctx: QueryContextRef) -> Result<Output> {
match stmt {
- Statement::Query(_) | Statement::Explain(_) => {
+ Statement::Query(_) | Statement::Explain(_) | Statement::Delete(_) => {
self.plan_exec(QueryStatement::Sql(stmt), query_ctx).await
}
Statement::Insert(insert) => self.insert(insert, query_ctx).await,
- Statement::Delete(delete) => self.delete(delete, query_ctx).await,
-
Statement::Tql(tql) => self.execute_tql(tql, query_ctx).await,
Statement::DescribeTable(stmt) => self.describe_table(stmt, query_ctx).await,
diff --git a/src/operator/src/statement/copy_table_from.rs b/src/operator/src/statement/copy_table_from.rs
index b335e5356a5e..9f34627ee4eb 100644
--- a/src/operator/src/statement/copy_table_from.rs
+++ b/src/operator/src/statement/copy_table_from.rs
@@ -330,7 +330,6 @@ impl StatementExecutor {
schema_name: req.schema_name.to_string(),
table_name: req.table_name.to_string(),
columns_values,
- region_number: 0,
},
query_ctx.clone(),
));
diff --git a/src/operator/src/statement/dml.rs b/src/operator/src/statement/dml.rs
index f127dda4d48b..197ac5c03b11 100644
--- a/src/operator/src/statement/dml.rs
+++ b/src/operator/src/statement/dml.rs
@@ -12,30 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::collections::HashMap;
-
use common_query::Output;
-use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
-use datafusion_expr::{DmlStatement, LogicalPlan as DfLogicalPlan, WriteOp};
-use datatypes::schema::SchemaRef;
-use futures_util::StreamExt;
use query::parser::QueryStatement;
-use query::plan::LogicalPlan;
use session::context::QueryContextRef;
-use snafu::{ensure, OptionExt, ResultExt};
-use sql::statements::delete::Delete;
use sql::statements::insert::Insert;
use sql::statements::statement::Statement;
-use table::engine::TableReference;
-use table::metadata::TableInfoRef;
-use table::requests::{DeleteRequest, InsertRequest};
-use table::TableRef;
use super::StatementExecutor;
-use crate::error::{
- BuildColumnVectorsSnafu, ExecLogicalPlanSnafu, MissingTimeIndexColumnSnafu,
- ReadRecordBatchSnafu, Result, UnexpectedSnafu,
-};
+use crate::error::Result;
impl StatementExecutor {
pub async fn insert(&self, insert: Box<Insert>, query_ctx: QueryContextRef) -> Result<Output> {
@@ -45,178 +29,9 @@ impl StatementExecutor {
.handle_statement_insert(insert.as_ref(), &query_ctx)
.await
} else {
- // Slow path: insert with subquery. Execute the subquery first, via query engine. Then
- // insert the results by sending insert requests.
-
- // 1. Plan the whole insert statement into a logical plan, then a wrong insert statement
- // will be caught and a plan error will be returned.
+ // Slow path: insert with subquery. Execute using query engine.
let statement = QueryStatement::Sql(Statement::Insert(insert));
- let logical_plan = self.plan(statement, query_ctx.clone()).await?;
-
- // 2. Execute the subquery, get the results as a record batch stream.
- let dml_statement = extract_dml_statement(logical_plan)?;
- ensure!(
- dml_statement.op == WriteOp::Insert,
- UnexpectedSnafu {
- violated: "expected an INSERT plan"
- }
- );
- let mut stream = self
- .execute_dml_subquery(&dml_statement, query_ctx.clone())
- .await?;
-
- // 3. Send insert requests.
- let mut affected_rows = 0;
- let table = self.get_table_from_dml(dml_statement, &query_ctx).await?;
- let table_info = table.table_info();
- while let Some(batch) = stream.next().await {
- let record_batch = batch.context(ReadRecordBatchSnafu)?;
- let insert_request =
- build_insert_request(record_batch, table.schema(), &table_info)?;
- affected_rows += self
- .inserter
- .handle_table_insert(insert_request, query_ctx.clone())
- .await?;
- }
-
- Ok(Output::AffectedRows(affected_rows))
+ self.plan_exec(statement, query_ctx).await
}
}
-
- pub async fn delete(&self, delete: Box<Delete>, query_ctx: QueryContextRef) -> Result<Output> {
- // 1. Plan the whole delete statement into a logical plan, then a wrong delete statement
- // will be caught and a plan error will be returned.
- let statement = QueryStatement::Sql(Statement::Delete(delete));
- let logical_plan = self.plan(statement, query_ctx.clone()).await?;
-
- // 2. Execute the subquery, get the results as a record batch stream.
- let dml_statement = extract_dml_statement(logical_plan)?;
- ensure!(
- dml_statement.op == WriteOp::Delete,
- UnexpectedSnafu {
- violated: "expected a DELETE plan"
- }
- );
- let mut stream = self
- .execute_dml_subquery(&dml_statement, query_ctx.clone())
- .await?;
-
- // 3. Send delete requests.
- let mut affected_rows = 0;
- let table = self.get_table_from_dml(dml_statement, &query_ctx).await?;
- let table_info = table.table_info();
- while let Some(batch) = stream.next().await {
- let record_batch = batch.context(ReadRecordBatchSnafu)?;
- let request = build_delete_request(record_batch, table.schema(), &table_info)?;
- affected_rows += self
- .deleter
- .handle_table_delete(request, query_ctx.clone())
- .await?;
- }
-
- Ok(Output::AffectedRows(affected_rows as _))
- }
-
- async fn execute_dml_subquery(
- &self,
- dml_statement: &DmlStatement,
- query_ctx: QueryContextRef,
- ) -> Result<SendableRecordBatchStream> {
- let subquery_plan = LogicalPlan::from(dml_statement.input.as_ref().clone());
- let output = self
- .query_engine
- .execute(subquery_plan, query_ctx)
- .await
- .context(ExecLogicalPlanSnafu)?;
- match output {
- Output::Stream(stream) => Ok(stream),
- Output::RecordBatches(record_batches) => Ok(record_batches.as_stream()),
- _ => UnexpectedSnafu {
- violated: "expected a stream",
- }
- .fail(),
- }
- }
-
- async fn get_table_from_dml(
- &self,
- dml_statement: DmlStatement,
- query_ctx: &QueryContextRef,
- ) -> Result<TableRef> {
- let default_catalog = query_ctx.current_catalog().to_owned();
- let default_schema = query_ctx.current_schema().to_owned();
- let resolved_table_ref = dml_statement
- .table_name
- .resolve(&default_catalog, &default_schema);
- let table_ref = TableReference::full(
- &resolved_table_ref.catalog,
- &resolved_table_ref.schema,
- &resolved_table_ref.table,
- );
- self.get_table(&table_ref).await
- }
-}
-
-fn extract_dml_statement(logical_plan: LogicalPlan) -> Result<DmlStatement> {
- let LogicalPlan::DfPlan(df_plan) = logical_plan;
- match df_plan {
- DfLogicalPlan::Dml(dml) => Ok(dml),
- _ => UnexpectedSnafu {
- violated: "expected a DML plan",
- }
- .fail(),
- }
-}
-
-fn build_insert_request(
- record_batch: RecordBatch,
- table_schema: SchemaRef,
- table_info: &TableInfoRef,
-) -> Result<InsertRequest> {
- let columns_values = record_batch
- .column_vectors(&table_info.name, table_schema)
- .context(BuildColumnVectorsSnafu)?;
-
- Ok(InsertRequest {
- catalog_name: table_info.catalog_name.clone(),
- schema_name: table_info.schema_name.clone(),
- table_name: table_info.name.clone(),
- columns_values,
- region_number: 0,
- })
-}
-
-fn build_delete_request(
- record_batch: RecordBatch,
- table_schema: SchemaRef,
- table_info: &TableInfoRef,
-) -> Result<DeleteRequest> {
- let ts_column = table_schema
- .timestamp_column()
- .map(|x| x.name.clone())
- .with_context(|| table::error::MissingTimeIndexColumnSnafu {
- table_name: table_info.name.clone(),
- })
- .context(MissingTimeIndexColumnSnafu)?;
-
- let column_vectors = record_batch
- .column_vectors(&table_info.name, table_schema)
- .context(BuildColumnVectorsSnafu)?;
-
- let rowkey_columns = table_info
- .meta
- .row_key_column_names()
- .collect::<Vec<&String>>();
-
- let key_column_values = column_vectors
- .into_iter()
- .filter(|x| x.0 == ts_column || rowkey_columns.contains(&&x.0))
- .collect::<HashMap<_, _>>();
-
- Ok(DeleteRequest {
- catalog_name: table_info.catalog_name.clone(),
- schema_name: table_info.schema_name.clone(),
- table_name: table_info.name.clone(),
- key_column_values,
- })
}
diff --git a/src/operator/src/table.rs b/src/operator/src/table.rs
index 240dd0103247..abfc27732bf5 100644
--- a/src/operator/src/table.rs
+++ b/src/operator/src/table.rs
@@ -12,10 +12,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use async_trait::async_trait;
+use common_error::ext::BoxedError;
+use query::error as query_error;
+use query::error::Result as QueryResult;
+use query::table_mutation::{AffectedRows, TableMutationHandler};
use session::context::QueryContextRef;
+use snafu::ResultExt;
use sqlparser::ast::ObjectName;
+use table::requests::{DeleteRequest as TableDeleteRequest, InsertRequest as TableInsertRequest};
+use crate::delete::DeleterRef;
use crate::error::{InvalidSqlSnafu, Result};
+use crate::insert::InserterRef;
// TODO(LFC): Refactor consideration: move this function to some helper mod,
// could be done together or after `TableReference`'s refactoring, when issue #559 is resolved.
@@ -47,3 +56,41 @@ pub fn table_idents_to_full_name(
}.fail(),
}
}
+
+pub struct TableMutationOperator {
+ inserter: InserterRef,
+ deleter: DeleterRef,
+}
+
+impl TableMutationOperator {
+ pub fn new(inserter: InserterRef, deleter: DeleterRef) -> Self {
+ Self { inserter, deleter }
+ }
+}
+
+#[async_trait]
+impl TableMutationHandler for TableMutationOperator {
+ async fn insert(
+ &self,
+ request: TableInsertRequest,
+ ctx: QueryContextRef,
+ ) -> QueryResult<AffectedRows> {
+ self.inserter
+ .handle_table_insert(request, ctx)
+ .await
+ .map_err(BoxedError::new)
+ .context(query_error::TableMutationSnafu)
+ }
+
+ async fn delete(
+ &self,
+ request: TableDeleteRequest,
+ ctx: QueryContextRef,
+ ) -> QueryResult<AffectedRows> {
+ self.deleter
+ .handle_table_delete(request, ctx)
+ .await
+ .map_err(BoxedError::new)
+ .context(query_error::TableMutationSnafu)
+ }
+}
diff --git a/src/operator/src/tests/partition_manager.rs b/src/operator/src/tests/partition_manager.rs
index 415ec0f5d610..58421634ae2a 100644
--- a/src/operator/src/tests/partition_manager.rs
+++ b/src/operator/src/tests/partition_manager.rs
@@ -435,7 +435,6 @@ fn test_meter_insert_request() {
schema_name: "public".to_string(),
table_name: "numbers".to_string(),
columns_values: Default::default(),
- region_number: 0,
};
meter_insert_request!(req);
diff --git a/src/query/src/datafusion.rs b/src/query/src/datafusion.rs
index 1357897b589c..3928a630c92b 100644
--- a/src/query/src/datafusion.rs
+++ b/src/query/src/datafusion.rs
@@ -54,8 +54,8 @@ use crate::dataframe::DataFrame;
pub use crate::datafusion::planner::DfContextProviderAdapter;
use crate::error::{
CatalogSnafu, CreateRecordBatchSnafu, CreateSchemaSnafu, DataFusionSnafu,
- MissingTimestampColumnSnafu, QueryExecutionSnafu, Result, TableNotFoundSnafu,
- UnimplementedSnafu, UnsupportedExprSnafu,
+ MissingTableMutationHandlerSnafu, MissingTimestampColumnSnafu, QueryExecutionSnafu, Result,
+ TableNotFoundSnafu, UnimplementedSnafu, UnsupportedExprSnafu,
};
use crate::executor::QueryExecutor;
use crate::logical_optimizer::LogicalOptimizer;
@@ -115,7 +115,7 @@ impl DatafusionQueryEngine {
let table = self.find_table(&table_name).await?;
let output = self
- .exec_query_plan(LogicalPlan::DfPlan((*dml.input).clone()), query_ctx)
+ .exec_query_plan(LogicalPlan::DfPlan((*dml.input).clone()), query_ctx.clone())
.await?;
let mut stream = match output {
Output::RecordBatches(batches) => batches.as_stream(),
@@ -132,8 +132,14 @@ impl DatafusionQueryEngine {
.context(QueryExecutionSnafu)?;
let rows = match dml.op {
- WriteOp::Insert => Self::insert(&table_name, &table, column_vectors).await?,
- WriteOp::Delete => Self::delete(&table_name, &table, column_vectors).await?,
+ WriteOp::Insert => {
+ self.insert(&table_name, column_vectors, query_ctx.clone())
+ .await?
+ }
+ WriteOp::Delete => {
+ self.delete(&table_name, &table, column_vectors, query_ctx.clone())
+ .await?
+ }
_ => unreachable!("guarded by the 'ensure!' at the beginning"),
};
affected_rows += rows;
@@ -142,9 +148,11 @@ impl DatafusionQueryEngine {
}
async fn delete<'a>(
+ &self,
table_name: &ResolvedTableReference<'a>,
table: &TableRef,
column_vectors: HashMap<String, VectorRef>,
+ query_ctx: QueryContextRef,
) -> Result<usize> {
let catalog_name = table_name.catalog.to_string();
let schema_name = table_name.schema.to_string();
@@ -174,31 +182,31 @@ impl DatafusionQueryEngine {
key_column_values: column_vectors,
};
- table
- .delete(request)
+ self.state
+ .table_mutation_handler()
+ .context(MissingTableMutationHandlerSnafu)?
+ .delete(request, query_ctx)
.await
- .map_err(BoxedError::new)
- .context(QueryExecutionSnafu)
}
async fn insert<'a>(
+ &self,
table_name: &ResolvedTableReference<'a>,
- table: &TableRef,
column_vectors: HashMap<String, VectorRef>,
+ query_ctx: QueryContextRef,
) -> Result<usize> {
let request = InsertRequest {
catalog_name: table_name.catalog.to_string(),
schema_name: table_name.schema.to_string(),
table_name: table_name.table.to_string(),
columns_values: column_vectors,
- region_number: 0,
};
- table
- .insert(request)
+ self.state
+ .table_mutation_handler()
+ .context(MissingTableMutationHandlerSnafu)?
+ .insert(request, query_ctx)
.await
- .map_err(BoxedError::new)
- .context(QueryExecutionSnafu)
}
async fn find_table(&self, table_name: &ResolvedTableReference<'_>) -> Result<TableRef> {
@@ -517,7 +525,7 @@ mod tests {
};
catalog_manager.register_table_sync(req).unwrap();
- QueryEngineFactory::new(catalog_manager, None, false).query_engine()
+ QueryEngineFactory::new(catalog_manager, None, None, false).query_engine()
}
#[tokio::test]
diff --git a/src/query/src/error.rs b/src/query/src/error.rs
index e535a4b94b5c..5bbdab347261 100644
--- a/src/query/src/error.rs
+++ b/src/query/src/error.rs
@@ -254,11 +254,20 @@ pub enum Error {
#[snafu(display("Column schema has no default value, column: {}", column))]
ColumnSchemaNoDefault { column: String, location: Location },
- #[snafu(display("Region query error, source: {}", source))]
+ #[snafu(display("Region query error"))]
RegionQuery {
source: BoxedError,
location: Location,
},
+
+ #[snafu(display("Table mutation error"))]
+ TableMutation {
+ source: BoxedError,
+ location: Location,
+ },
+
+ #[snafu(display("Missing table mutation handler"))]
+ MissingTableMutationHandler { location: Location },
}
impl ErrorExt for Error {
@@ -305,7 +314,10 @@ impl ErrorExt for Error {
RemoteRequest { source, .. } => source.status_code(),
UnexpectedOutputKind { .. } => StatusCode::Unexpected,
CreateSchema { source, .. } => source.status_code(),
+
RegionQuery { source, .. } => source.status_code(),
+ TableMutation { source, .. } => source.status_code(),
+ MissingTableMutationHandler { .. } => StatusCode::Unexpected,
}
}
diff --git a/src/query/src/lib.rs b/src/query/src/lib.rs
index dfdbe9a208ce..54506f6c93bd 100644
--- a/src/query/src/lib.rs
+++ b/src/query/src/lib.rs
@@ -32,6 +32,7 @@ pub mod query_engine;
mod range_select;
pub mod region_query;
pub mod sql;
+pub mod table_mutation;
pub use crate::datafusion::DfContextProviderAdapter;
pub use crate::query_engine::{
diff --git a/src/query/src/query_engine.rs b/src/query/src/query_engine.rs
index b18834d2c8ec..d88f399da316 100644
--- a/src/query/src/query_engine.rs
+++ b/src/query/src/query_engine.rs
@@ -38,6 +38,7 @@ use crate::planner::LogicalPlanner;
pub use crate::query_engine::context::QueryEngineContext;
pub use crate::query_engine::state::QueryEngineState;
use crate::region_query::RegionQueryHandlerRef;
+use crate::table_mutation::TableMutationHandlerRef;
/// Describe statement result
#[derive(Debug)]
@@ -80,11 +81,13 @@ impl QueryEngineFactory {
pub fn new(
catalog_manager: CatalogManagerRef,
region_query_handler: Option<RegionQueryHandlerRef>,
+ table_mutation_handler: Option<TableMutationHandlerRef>,
with_dist_planner: bool,
) -> Self {
Self::new_with_plugins(
catalog_manager,
region_query_handler,
+ table_mutation_handler,
with_dist_planner,
Default::default(),
)
@@ -93,12 +96,14 @@ impl QueryEngineFactory {
pub fn new_with_plugins(
catalog_manager: CatalogManagerRef,
region_query_handler: Option<RegionQueryHandlerRef>,
+ table_mutation_handler: Option<TableMutationHandlerRef>,
with_dist_planner: bool,
plugins: Arc<Plugins>,
) -> Self {
let state = Arc::new(QueryEngineState::new(
catalog_manager,
region_query_handler,
+ table_mutation_handler,
with_dist_planner,
plugins.clone(),
));
@@ -131,7 +136,7 @@ mod tests {
#[test]
fn test_query_engine_factory() {
let catalog_list = catalog::memory::new_memory_catalog_manager().unwrap();
- let factory = QueryEngineFactory::new(catalog_list, None, false);
+ let factory = QueryEngineFactory::new(catalog_list, None, None, false);
let engine = factory.query_engine();
diff --git a/src/query/src/query_engine/state.rs b/src/query/src/query_engine/state.rs
index 93916db86101..e882a6811e84 100644
--- a/src/query/src/query_engine/state.rs
+++ b/src/query/src/query_engine/state.rs
@@ -48,6 +48,7 @@ use crate::optimizer::type_conversion::TypeConversionRule;
use crate::query_engine::options::QueryOptions;
use crate::range_select::planner::RangeSelectPlanner;
use crate::region_query::RegionQueryHandlerRef;
+use crate::table_mutation::TableMutationHandlerRef;
/// Query engine global state
// TODO(yingwen): This QueryEngineState still relies on datafusion, maybe we can define a trait for it,
@@ -57,6 +58,7 @@ use crate::region_query::RegionQueryHandlerRef;
pub struct QueryEngineState {
df_context: SessionContext,
catalog_manager: CatalogManagerRef,
+ table_mutation_handler: Option<TableMutationHandlerRef>,
aggregate_functions: Arc<RwLock<HashMap<String, AggregateFunctionMetaRef>>>,
plugins: Arc<Plugins>,
}
@@ -73,6 +75,7 @@ impl QueryEngineState {
pub fn new(
catalog_list: CatalogManagerRef,
region_query_handler: Option<RegionQueryHandlerRef>,
+ table_mutation_handler: Option<TableMutationHandlerRef>,
with_dist_planner: bool,
plugins: Arc<Plugins>,
) -> Self {
@@ -123,6 +126,7 @@ impl QueryEngineState {
Self {
df_context,
catalog_manager: catalog_list,
+ table_mutation_handler,
aggregate_functions: Arc::new(RwLock::new(HashMap::new())),
plugins,
}
@@ -184,6 +188,11 @@ impl QueryEngineState {
&self.catalog_manager
}
+ #[inline]
+ pub fn table_mutation_handler(&self) -> Option<&TableMutationHandlerRef> {
+ self.table_mutation_handler.as_ref()
+ }
+
pub(crate) fn disallow_cross_schema_query(&self) -> bool {
self.plugins
.map::<QueryOptions, _, _>(|x| x.disallow_cross_schema_query)
diff --git a/src/query/src/range_select/plan_rewrite.rs b/src/query/src/range_select/plan_rewrite.rs
index 8842b6dd367a..e7ae66bac8b8 100644
--- a/src/query/src/range_select/plan_rewrite.rs
+++ b/src/query/src/range_select/plan_rewrite.rs
@@ -388,7 +388,7 @@ mod test {
table,
})
.is_ok());
- QueryEngineFactory::new(catalog_list, None, false).query_engine()
+ QueryEngineFactory::new(catalog_list, None, None, false).query_engine()
}
async fn query_plan_compare(sql: &str, expected: String) {
diff --git a/src/query/src/region_query.rs b/src/query/src/region_query.rs
index ef6e494dfc86..f9861103e62b 100644
--- a/src/query/src/region_query.rs
+++ b/src/query/src/region_query.rs
@@ -22,7 +22,6 @@ use crate::error::Result;
#[async_trait]
pub trait RegionQueryHandler: Send + Sync {
- // TODO(ruihang): add trace id and span id in the request.
async fn do_get(&self, request: QueryRequest) -> Result<SendableRecordBatchStream>;
}
diff --git a/src/query/src/table_mutation.rs b/src/query/src/table_mutation.rs
new file mode 100644
index 000000000000..bff93af93687
--- /dev/null
+++ b/src/query/src/table_mutation.rs
@@ -0,0 +1,35 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use async_trait::async_trait;
+use session::context::QueryContextRef;
+use table::requests::{DeleteRequest, InsertRequest};
+
+use crate::error::Result;
+
+pub type AffectedRows = usize;
+
+/// A trait for handling table mutations in `QueryEngine`.
+#[async_trait]
+pub trait TableMutationHandler: Send + Sync {
+ /// Inserts rows into the table.
+ async fn insert(&self, request: InsertRequest, ctx: QueryContextRef) -> Result<AffectedRows>;
+
+ /// Delete rows from the table.
+ async fn delete(&self, request: DeleteRequest, ctx: QueryContextRef) -> Result<AffectedRows>;
+}
+
+pub type TableMutationHandlerRef = Arc<dyn TableMutationHandler>;
diff --git a/src/query/src/tests.rs b/src/query/src/tests.rs
index 4a60f925ce0b..14e62a2d59c0 100644
--- a/src/query/src/tests.rs
+++ b/src/query/src/tests.rs
@@ -51,5 +51,5 @@ async fn exec_selection(engine: QueryEngineRef, sql: &str) -> Vec<RecordBatch> {
pub fn new_query_engine_with_table(table: TableRef) -> QueryEngineRef {
let catalog_manager = MemoryCatalogManager::new_with_table(table);
- QueryEngineFactory::new(catalog_manager, None, false).query_engine()
+ QueryEngineFactory::new(catalog_manager, None, None, false).query_engine()
}
diff --git a/src/query/src/tests/query_engine_test.rs b/src/query/src/tests/query_engine_test.rs
index 41e95d3de1e5..71894eea47c2 100644
--- a/src/query/src/tests/query_engine_test.rs
+++ b/src/query/src/tests/query_engine_test.rs
@@ -47,7 +47,7 @@ async fn test_datafusion_query_engine() -> Result<()> {
let catalog_list = catalog::memory::new_memory_catalog_manager()
.map_err(BoxedError::new)
.context(QueryExecutionSnafu)?;
- let factory = QueryEngineFactory::new(catalog_list, None, false);
+ let factory = QueryEngineFactory::new(catalog_list, None, None, false);
let engine = factory.query_engine();
let column_schemas = vec![ColumnSchema::new(
@@ -129,7 +129,7 @@ async fn test_query_validate() -> Result<()> {
});
let plugins = Arc::new(plugins);
- let factory = QueryEngineFactory::new_with_plugins(catalog_list, None, false, plugins);
+ let factory = QueryEngineFactory::new_with_plugins(catalog_list, None, None, false, plugins);
let engine = factory.query_engine();
let stmt = QueryLanguageParser::parse_sql("select number from public.numbers").unwrap();
@@ -153,7 +153,7 @@ async fn test_udf() -> Result<()> {
common_telemetry::init_default_ut_logging();
let catalog_list = catalog_manager()?;
- let factory = QueryEngineFactory::new(catalog_list, None, false);
+ let factory = QueryEngineFactory::new(catalog_list, None, None, false);
let engine = factory.query_engine();
let pow = make_scalar_function(pow);
diff --git a/src/query/src/tests/time_range_filter_test.rs b/src/query/src/tests/time_range_filter_test.rs
index 17c894b54595..dfbcc3a13a73 100644
--- a/src/query/src/tests/time_range_filter_test.rs
+++ b/src/query/src/tests/time_range_filter_test.rs
@@ -106,7 +106,7 @@ fn create_test_engine() -> TimeRangeTester {
};
let _ = catalog_manager.register_table_sync(req).unwrap();
- let engine = QueryEngineFactory::new(catalog_manager, None, false).query_engine();
+ let engine = QueryEngineFactory::new(catalog_manager, None, None, false).query_engine();
TimeRangeTester { engine, filter }
}
diff --git a/src/script/benches/py_benchmark.rs b/src/script/benches/py_benchmark.rs
index f4827a18ad73..27d8ef07ce12 100644
--- a/src/script/benches/py_benchmark.rs
+++ b/src/script/benches/py_benchmark.rs
@@ -52,7 +52,7 @@ where
pub(crate) fn sample_script_engine() -> PyEngine {
let catalog_manager =
MemoryCatalogManager::new_with_table(NumbersTable::table(NUMBERS_TABLE_ID));
- let query_engine = QueryEngineFactory::new(catalog_manager, None, false).query_engine();
+ let query_engine = QueryEngineFactory::new(catalog_manager, None, None, false).query_engine();
PyEngine::new(query_engine.clone())
}
diff --git a/src/script/src/python/engine.rs b/src/script/src/python/engine.rs
index 5ab4f06298b5..61f603232a74 100644
--- a/src/script/src/python/engine.rs
+++ b/src/script/src/python/engine.rs
@@ -375,7 +375,8 @@ mod tests {
pub(crate) fn sample_script_engine() -> PyEngine {
let catalog_manager =
MemoryCatalogManager::new_with_table(NumbersTable::table(NUMBERS_TABLE_ID));
- let query_engine = QueryEngineFactory::new(catalog_manager, None, false).query_engine();
+ let query_engine =
+ QueryEngineFactory::new(catalog_manager, None, None, false).query_engine();
PyEngine::new(query_engine.clone())
}
diff --git a/src/script/src/test.rs b/src/script/src/test.rs
index c937c4afd215..55ba73f582a1 100644
--- a/src/script/src/test.rs
+++ b/src/script/src/test.rs
@@ -56,7 +56,7 @@ pub async fn setup_scripts_manager(
let catalog_manager = MemoryCatalogManager::new_with_table(table.clone());
- let factory = QueryEngineFactory::new(catalog_manager.clone(), None, false);
+ let factory = QueryEngineFactory::new(catalog_manager.clone(), None, None, false);
let query_engine = factory.query_engine();
let mgr = ScriptManager::new(Arc::new(MockGrpcQueryHandler {}) as _, query_engine)
.await
diff --git a/src/servers/src/line_writer.rs b/src/servers/src/line_writer.rs
index 7da1de6bb8df..38ebe218c7a9 100644
--- a/src/servers/src/line_writer.rs
+++ b/src/servers/src/line_writer.rs
@@ -141,7 +141,6 @@ impl LineWriter {
schema_name: self.db,
table_name: self.table_name,
columns_values,
- region_number: 0, // TODO(hl): Check if assign 0 region is ok?
}
}
}
diff --git a/src/servers/tests/mod.rs b/src/servers/tests/mod.rs
index 4ef156cb0431..bb2a79818c27 100644
--- a/src/servers/tests/mod.rs
+++ b/src/servers/tests/mod.rs
@@ -214,7 +214,7 @@ impl GrpcQueryHandler for DummyInstance {
fn create_testing_instance(table: TableRef) -> DummyInstance {
let catalog_manager = MemoryCatalogManager::new_with_table(table);
- let query_engine = QueryEngineFactory::new(catalog_manager, None, false).query_engine();
+ let query_engine = QueryEngineFactory::new(catalog_manager, None, None, false).query_engine();
DummyInstance::new(query_engine)
}
diff --git a/src/table/src/requests.rs b/src/table/src/requests.rs
index 9fcb7f8db7e3..9d78f21534fb 100644
--- a/src/table/src/requests.rs
+++ b/src/table/src/requests.rs
@@ -246,7 +246,6 @@ pub struct InsertRequest {
pub schema_name: String,
pub table_name: String,
pub columns_values: HashMap<String, VectorRef>,
- pub region_number: RegionNumber,
}
/// Delete (by primary key) request
@@ -327,7 +326,6 @@ macro_rules! meter_insert_request {
$req.catalog_name.to_string(),
$req.schema_name.to_string(),
$req.table_name.to_string(),
- $req.region_number,
$req
);
};
diff --git a/src/table/src/table.rs b/src/table/src/table.rs
index 8af3cfd1ffec..d2ea65cf840b 100644
--- a/src/table/src/table.rs
+++ b/src/table/src/table.rs
@@ -24,14 +24,10 @@ use async_trait::async_trait;
use common_query::logical_plan::Expr;
use common_recordbatch::SendableRecordBatchStream;
use datatypes::schema::SchemaRef;
-use store_api::storage::{RegionNumber, ScanRequest};
+use store_api::storage::ScanRequest;
-use crate::error::{Result, UnsupportedSnafu};
+use crate::error::Result;
use crate::metadata::{FilterPushDownType, TableId, TableInfoRef, TableType};
-use crate::requests::{AlterTableRequest, DeleteRequest, InsertRequest};
-use crate::stats::TableStatistics;
-
-pub type AlterContext = anymap::Map<dyn Any + Send + Sync>;
/// Table abstraction.
#[async_trait]
@@ -49,16 +45,6 @@ pub trait Table: Send + Sync {
/// Get the type of this table for metadata/catalog purposes.
fn table_type(&self) -> TableType;
- /// Insert values into table.
- ///
- /// Returns number of inserted rows.
- async fn insert(&self, _request: InsertRequest) -> Result<usize> {
- UnsupportedSnafu {
- operation: "INSERT",
- }
- .fail()?
- }
-
async fn scan_to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream>;
/// Tests whether the table provider can make use of any or all filter expressions
@@ -66,67 +52,6 @@ pub trait Table: Send + Sync {
fn supports_filters_pushdown(&self, filters: &[&Expr]) -> Result<Vec<FilterPushDownType>> {
Ok(vec![FilterPushDownType::Unsupported; filters.len()])
}
-
- /// Alter table.
- async fn alter(&self, _context: AlterContext, _request: &AlterTableRequest) -> Result<()> {
- UnsupportedSnafu {
- operation: "ALTER TABLE",
- }
- .fail()?
- }
-
- /// Delete rows in the table.
- ///
- /// Returns number of deleted rows.
- async fn delete(&self, _request: DeleteRequest) -> Result<usize> {
- UnsupportedSnafu {
- operation: "DELETE",
- }
- .fail()?
- }
-
- /// Flush table.
- ///
- /// Options:
- /// - region_number: specify region to flush.
- /// - wait: Whether to wait until flush is done.
- async fn flush(&self, region_number: Option<RegionNumber>, wait: Option<bool>) -> Result<()> {
- let _ = (region_number, wait);
- UnsupportedSnafu { operation: "FLUSH" }.fail()?
- }
-
- /// Close the table.
- async fn close(&self, _regions: &[RegionNumber]) -> Result<()> {
- Ok(())
- }
-
- /// Return true if contains the region
- fn contains_region(&self, _region: RegionNumber) -> Result<bool> {
- UnsupportedSnafu {
- operation: "contain_region",
- }
- .fail()?
- }
-
- /// Get statistics for this table, if available
- fn statistics(&self) -> Option<TableStatistics> {
- None
- }
-
- async fn compact(&self, region_number: Option<RegionNumber>, wait: Option<bool>) -> Result<()> {
- let _ = (region_number, wait);
- UnsupportedSnafu {
- operation: "COMPACTION",
- }
- .fail()?
- }
-
- async fn truncate(&self) -> Result<()> {
- UnsupportedSnafu {
- operation: "TRUNCATE",
- }
- .fail()?
- }
}
pub type TableRef = Arc<dyn Table>;
|
feat
|
re-support query engine execute dml (#2484)
|
cb4cffe636efde1a14f23bc7f30f4c9d820a929f
|
2024-08-04 06:16:04
|
Weny Xu
|
chore: bump opendal version to 0.48 (#4499)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 3afe95509cac..c1c7d1263603 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -6967,9 +6967,9 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
[[package]]
name = "opendal"
-version = "0.47.2"
+version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ff159a2da374ef2d64848a6547943cf1af7d2ceada5ae77be175e1389aa07ae3"
+checksum = "615d41187deea0ea7fab5b48e9afef6ae8fc742fdcfa248846ee3d92ff71e986"
dependencies = [
"anyhow",
"async-trait",
@@ -6986,7 +6986,7 @@ dependencies = [
"md-5",
"once_cell",
"percent-encoding",
- "quick-xml 0.31.0",
+ "quick-xml 0.36.1",
"reqsign",
"reqwest",
"serde",
@@ -8605,9 +8605,19 @@ dependencies = [
[[package]]
name = "quick-xml"
-version = "0.31.0"
+version = "0.35.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "86e446ed58cef1bbfe847bc2fda0e2e4ea9f0e57b90c507d4781292590d72a4e"
+dependencies = [
+ "memchr",
+ "serde",
+]
+
+[[package]]
+name = "quick-xml"
+version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1004a344b30a54e2ee58d66a71b32d2db2feb0a31f9a2d302bf0536f15de2a33"
+checksum = "96a05e2e8efddfa51a84ca47cec303fac86c8541b686d37cac5efc0e094417bc"
dependencies = [
"memchr",
"serde",
@@ -8883,9 +8893,9 @@ dependencies = [
[[package]]
name = "reqsign"
-version = "0.15.2"
+version = "0.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "70fe66d4cd0b5ed9b1abbfe639bf6baeaaf509f7da2d51b31111ba945be59286"
+checksum = "03dd4ba7c3901dd43e6b8c7446a760d45bc1ea4301002e1a6fa48f97c3a796fa"
dependencies = [
"anyhow",
"async-trait",
@@ -8901,7 +8911,7 @@ dependencies = [
"log",
"once_cell",
"percent-encoding",
- "quick-xml 0.31.0",
+ "quick-xml 0.35.0",
"rand",
"reqwest",
"rsa 0.9.6",
diff --git a/src/common/datasource/src/object_store/fs.rs b/src/common/datasource/src/object_store/fs.rs
index 16e30b0044c0..f21fe46099d5 100644
--- a/src/common/datasource/src/object_store/fs.rs
+++ b/src/common/datasource/src/object_store/fs.rs
@@ -19,9 +19,8 @@ use snafu::ResultExt;
use crate::error::{BuildBackendSnafu, Result};
pub fn build_fs_backend(root: &str) -> Result<ObjectStore> {
- let mut builder = Fs::default();
- let _ = builder.root(root);
- let object_store = ObjectStore::new(builder)
+ let builder = Fs::default();
+ let object_store = ObjectStore::new(builder.root(root))
.context(BuildBackendSnafu)?
.layer(
object_store::layers::LoggingLayer::default()
diff --git a/src/common/datasource/src/object_store/s3.rs b/src/common/datasource/src/object_store/s3.rs
index 6efc6474c45d..cdba93767745 100644
--- a/src/common/datasource/src/object_store/s3.rs
+++ b/src/common/datasource/src/object_store/s3.rs
@@ -44,28 +44,26 @@ pub fn build_s3_backend(
path: &str,
connection: &HashMap<String, String>,
) -> Result<ObjectStore> {
- let mut builder = S3::default();
-
- let _ = builder.root(path).bucket(host);
+ let mut builder = S3::default().root(path).bucket(host);
if let Some(endpoint) = connection.get(ENDPOINT) {
- let _ = builder.endpoint(endpoint);
+ builder = builder.endpoint(endpoint);
}
if let Some(region) = connection.get(REGION) {
- let _ = builder.region(region);
+ builder = builder.region(region);
}
if let Some(key_id) = connection.get(ACCESS_KEY_ID) {
- let _ = builder.access_key_id(key_id);
+ builder = builder.access_key_id(key_id);
}
if let Some(key) = connection.get(SECRET_ACCESS_KEY) {
- let _ = builder.secret_access_key(key);
+ builder = builder.secret_access_key(key);
}
if let Some(session_token) = connection.get(SESSION_TOKEN) {
- let _ = builder.security_token(session_token);
+ builder = builder.session_token(session_token);
}
if let Some(enable_str) = connection.get(ENABLE_VIRTUAL_HOST_STYLE) {
@@ -79,7 +77,7 @@ pub fn build_s3_backend(
.build()
})?;
if enable {
- let _ = builder.enable_virtual_host_style();
+ builder = builder.enable_virtual_host_style();
}
}
diff --git a/src/common/datasource/src/test_util.rs b/src/common/datasource/src/test_util.rs
index d3a24a23d24a..64fa41ad62ec 100644
--- a/src/common/datasource/src/test_util.rs
+++ b/src/common/datasource/src/test_util.rs
@@ -47,19 +47,15 @@ pub fn format_schema(schema: Schema) -> Vec<String> {
}
pub fn test_store(root: &str) -> ObjectStore {
- let mut builder = Fs::default();
- let _ = builder.root(root);
-
- ObjectStore::new(builder).unwrap().finish()
+ let builder = Fs::default();
+ ObjectStore::new(builder.root(root)).unwrap().finish()
}
pub fn test_tmp_store(root: &str) -> (ObjectStore, TempDir) {
let dir = create_temp_dir(root);
- let mut builder = Fs::default();
- let _ = builder.root("/");
-
- (ObjectStore::new(builder).unwrap().finish(), dir)
+ let builder = Fs::default();
+ (ObjectStore::new(builder.root("/")).unwrap().finish(), dir)
}
pub fn test_basic_schema() -> SchemaRef {
diff --git a/src/common/procedure/src/local.rs b/src/common/procedure/src/local.rs
index 54ae88a5cf50..574fb612b246 100644
--- a/src/common/procedure/src/local.rs
+++ b/src/common/procedure/src/local.rs
@@ -680,9 +680,8 @@ pub(crate) mod test_util {
pub(crate) fn new_object_store(dir: &TempDir) -> ObjectStore {
let store_dir = dir.path().to_str().unwrap();
- let mut builder = Builder::default();
- let _ = builder.root(store_dir);
- ObjectStore::new(builder).unwrap().finish()
+ let builder = Builder::default();
+ ObjectStore::new(builder.root(store_dir)).unwrap().finish()
}
}
diff --git a/src/common/procedure/src/store.rs b/src/common/procedure/src/store.rs
index 22e5043d306a..4bc5f8ca814e 100644
--- a/src/common/procedure/src/store.rs
+++ b/src/common/procedure/src/store.rs
@@ -361,8 +361,7 @@ mod tests {
fn procedure_store_for_test(dir: &TempDir) -> ProcedureStore {
let store_dir = dir.path().to_str().unwrap();
- let mut builder = Builder::default();
- let _ = builder.root(store_dir);
+ let builder = Builder::default().root(store_dir);
let object_store = ObjectStore::new(builder).unwrap().finish();
ProcedureStore::from_object_store(object_store)
diff --git a/src/common/procedure/src/store/state_store.rs b/src/common/procedure/src/store/state_store.rs
index 096ef84b125d..4f119739545f 100644
--- a/src/common/procedure/src/store/state_store.rs
+++ b/src/common/procedure/src/store/state_store.rs
@@ -220,8 +220,7 @@ mod tests {
async fn test_object_state_store() {
let dir = create_temp_dir("state_store");
let store_dir = dir.path().to_str().unwrap();
- let mut builder = Builder::default();
- let _ = builder.root(store_dir);
+ let builder = Builder::default().root(store_dir);
let object_store = ObjectStore::new(builder).unwrap().finish();
let state_store = ObjectStateStore::new(object_store);
@@ -291,8 +290,7 @@ mod tests {
async fn test_object_state_store_delete() {
let dir = create_temp_dir("state_store_list");
let store_dir = dir.path().to_str().unwrap();
- let mut builder = Builder::default();
- let _ = builder.root(store_dir);
+ let builder = Builder::default().root(store_dir);
let object_store = ObjectStore::new(builder).unwrap().finish();
let state_store = ObjectStateStore::new(object_store);
diff --git a/src/datanode/src/store.rs b/src/datanode/src/store.rs
index 3260b2f762ca..877f044974bb 100644
--- a/src/datanode/src/store.rs
+++ b/src/datanode/src/store.rs
@@ -112,11 +112,11 @@ async fn create_object_store_with_cache(
let atomic_temp_dir = join_dir(path, ".tmp/");
clean_temp_dir(&atomic_temp_dir)?;
- let cache_store = {
- let mut builder = Fs::default();
- builder.root(path).atomic_write_dir(&atomic_temp_dir);
- builder.build().context(error::InitBackendSnafu)?
- };
+ let cache_store = Fs::default()
+ .root(path)
+ .atomic_write_dir(&atomic_temp_dir)
+ .build()
+ .context(error::InitBackendSnafu)?;
let cache_layer = LruCacheLayer::new(Arc::new(cache_store), cache_capacity.0 as usize)
.await
diff --git a/src/datanode/src/store/azblob.rs b/src/datanode/src/store/azblob.rs
index 156c2897ec9d..ca7a5023a90e 100644
--- a/src/datanode/src/store/azblob.rs
+++ b/src/datanode/src/store/azblob.rs
@@ -30,8 +30,7 @@ pub(crate) async fn new_azblob_object_store(azblob_config: &AzblobConfig) -> Res
azblob_config.container, &root
);
- let mut builder = Azblob::default();
- let _ = builder
+ let mut builder = Azblob::default()
.root(&root)
.container(&azblob_config.container)
.endpoint(&azblob_config.endpoint)
@@ -40,8 +39,8 @@ pub(crate) async fn new_azblob_object_store(azblob_config: &AzblobConfig) -> Res
.http_client(build_http_client()?);
if let Some(token) = &azblob_config.sas_token {
- let _ = builder.sas_token(token);
- }
+ builder = builder.sas_token(token);
+ };
Ok(ObjectStore::new(builder)
.context(error::InitBackendSnafu)?
diff --git a/src/datanode/src/store/fs.rs b/src/datanode/src/store/fs.rs
index 607598841261..119a5e1bf867 100644
--- a/src/datanode/src/store/fs.rs
+++ b/src/datanode/src/store/fs.rs
@@ -35,8 +35,9 @@ pub(crate) async fn new_fs_object_store(
let atomic_write_dir = join_dir(data_home, ".tmp/");
store::clean_temp_dir(&atomic_write_dir)?;
- let mut builder = Fs::default();
- let _ = builder.root(data_home).atomic_write_dir(&atomic_write_dir);
+ let builder = Fs::default()
+ .root(data_home)
+ .atomic_write_dir(&atomic_write_dir);
let object_store = ObjectStore::new(builder)
.context(error::InitBackendSnafu)?
diff --git a/src/datanode/src/store/gcs.rs b/src/datanode/src/store/gcs.rs
index e0a8c6a315a7..6c68ad3baccc 100644
--- a/src/datanode/src/store/gcs.rs
+++ b/src/datanode/src/store/gcs.rs
@@ -29,8 +29,7 @@ pub(crate) async fn new_gcs_object_store(gcs_config: &GcsConfig) -> Result<Objec
gcs_config.bucket, &root
);
- let mut builder = Gcs::default();
- builder
+ let builder = Gcs::default()
.root(&root)
.bucket(&gcs_config.bucket)
.scope(&gcs_config.scope)
diff --git a/src/datanode/src/store/oss.rs b/src/datanode/src/store/oss.rs
index b807a991970a..45090f443bef 100644
--- a/src/datanode/src/store/oss.rs
+++ b/src/datanode/src/store/oss.rs
@@ -29,8 +29,7 @@ pub(crate) async fn new_oss_object_store(oss_config: &OssConfig) -> Result<Objec
oss_config.bucket, &root
);
- let mut builder = Oss::default();
- let _ = builder
+ let builder = Oss::default()
.root(&root)
.bucket(&oss_config.bucket)
.endpoint(&oss_config.endpoint)
diff --git a/src/datanode/src/store/s3.rs b/src/datanode/src/store/s3.rs
index bf7c0bfe14a8..667378e26e33 100644
--- a/src/datanode/src/store/s3.rs
+++ b/src/datanode/src/store/s3.rs
@@ -30,8 +30,7 @@ pub(crate) async fn new_s3_object_store(s3_config: &S3Config) -> Result<ObjectSt
s3_config.bucket, &root
);
- let mut builder = S3::default();
- let _ = builder
+ let mut builder = S3::default()
.root(&root)
.bucket(&s3_config.bucket)
.access_key_id(s3_config.access_key_id.expose_secret())
@@ -39,11 +38,11 @@ pub(crate) async fn new_s3_object_store(s3_config: &S3Config) -> Result<ObjectSt
.http_client(build_http_client()?);
if s3_config.endpoint.is_some() {
- let _ = builder.endpoint(s3_config.endpoint.as_ref().unwrap());
- }
+ builder = builder.endpoint(s3_config.endpoint.as_ref().unwrap());
+ };
if s3_config.region.is_some() {
- let _ = builder.region(s3_config.region.as_ref().unwrap());
- }
+ builder = builder.region(s3_config.region.as_ref().unwrap());
+ };
Ok(ObjectStore::new(builder)
.context(error::InitBackendSnafu)?
diff --git a/src/file-engine/src/test_util.rs b/src/file-engine/src/test_util.rs
index 4f14c1341a70..7dbdf6b513fb 100644
--- a/src/file-engine/src/test_util.rs
+++ b/src/file-engine/src/test_util.rs
@@ -26,8 +26,7 @@ use store_api::metadata::ColumnMetadata;
pub fn new_test_object_store(prefix: &str) -> (TempDir, ObjectStore) {
let dir = create_temp_dir(prefix);
let store_dir = dir.path().to_string_lossy();
- let mut builder = Fs::default();
- let _ = builder.root(&store_dir);
+ let builder = Fs::default().root(&store_dir);
(dir, ObjectStore::new(builder).unwrap().finish())
}
diff --git a/src/metric-engine/src/test_util.rs b/src/metric-engine/src/test_util.rs
index 71c35b6119cc..c5f7a2b4a32c 100644
--- a/src/metric-engine/src/test_util.rs
+++ b/src/metric-engine/src/test_util.rs
@@ -307,8 +307,7 @@ mod test {
env.init_metric_region().await;
let region_id = to_metadata_region_id(env.default_physical_region_id());
- let mut builder = Fs::default();
- builder.root(&env.data_home());
+ let builder = Fs::default().root(&env.data_home());
let object_store = ObjectStore::new(builder).unwrap().finish();
let region_dir = "test_metric_region";
diff --git a/src/mito2/src/access_layer.rs b/src/mito2/src/access_layer.rs
index a72527fb3351..c8f3de8785d5 100644
--- a/src/mito2/src/access_layer.rs
+++ b/src/mito2/src/access_layer.rs
@@ -212,8 +212,7 @@ pub(crate) async fn new_fs_cache_store(root: &str) -> Result<ObjectStore> {
let atomic_write_dir = join_dir(root, ".tmp/");
clean_dir(&atomic_write_dir).await?;
- let mut builder = Fs::default();
- builder.root(root).atomic_write_dir(&atomic_write_dir);
+ let builder = Fs::default().root(root).atomic_write_dir(&atomic_write_dir);
let store = ObjectStore::new(builder).context(OpenDalSnafu)?.finish();
Ok(with_instrument_layers(store, false))
diff --git a/src/mito2/src/cache/file_cache.rs b/src/mito2/src/cache/file_cache.rs
index b6d667ea045b..6e902490c0dc 100644
--- a/src/mito2/src/cache/file_cache.rs
+++ b/src/mito2/src/cache/file_cache.rs
@@ -382,8 +382,7 @@ mod tests {
use super::*;
fn new_fs_store(path: &str) -> ObjectStore {
- let mut builder = Fs::default();
- builder.root(path);
+ let builder = Fs::default().root(path);
ObjectStore::new(builder).unwrap().finish()
}
diff --git a/src/mito2/src/cache/test_util.rs b/src/mito2/src/cache/test_util.rs
index 306bb50467e8..9c3b08177fa4 100644
--- a/src/mito2/src/cache/test_util.rs
+++ b/src/mito2/src/cache/test_util.rs
@@ -46,7 +46,6 @@ fn parquet_file_data() -> Vec<u8> {
}
pub(crate) fn new_fs_store(path: &str) -> ObjectStore {
- let mut builder = Fs::default();
- builder.root(path);
- ObjectStore::new(builder).unwrap().finish()
+ let builder = Fs::default();
+ ObjectStore::new(builder.root(path)).unwrap().finish()
}
diff --git a/src/mito2/src/manifest/storage.rs b/src/mito2/src/manifest/storage.rs
index d470f2050b89..060a84f49de7 100644
--- a/src/mito2/src/manifest/storage.rs
+++ b/src/mito2/src/manifest/storage.rs
@@ -642,8 +642,7 @@ mod tests {
fn new_test_manifest_store() -> ManifestObjectStore {
common_telemetry::init_default_ut_logging();
let tmp_dir = create_temp_dir("test_manifest_log_store");
- let mut builder = Fs::default();
- let _ = builder.root(&tmp_dir.path().to_string_lossy());
+ let builder = Fs::default().root(&tmp_dir.path().to_string_lossy());
let object_store = ObjectStore::new(builder).unwrap().finish();
ManifestObjectStore::new(
"/",
diff --git a/src/mito2/src/sst/file_purger.rs b/src/mito2/src/sst/file_purger.rs
index 0753b1a3eb76..9e6c6c89e8eb 100644
--- a/src/mito2/src/sst/file_purger.rs
+++ b/src/mito2/src/sst/file_purger.rs
@@ -114,8 +114,7 @@ mod tests {
let dir = create_temp_dir("file-purge");
let dir_path = dir.path().display().to_string();
- let mut builder = Fs::default();
- builder.root(&dir_path);
+ let builder = Fs::default().root(&dir_path);
let sst_file_id = FileId::random();
let sst_dir = "table1";
let path = location::sst_file_path(sst_dir, sst_file_id);
@@ -171,8 +170,7 @@ mod tests {
let dir = create_temp_dir("file-purge");
let dir_path = dir.path().display().to_string();
- let mut builder = Fs::default();
- builder.root(&dir_path);
+ let builder = Fs::default().root(&dir_path);
let sst_file_id = FileId::random();
let sst_dir = "table1";
diff --git a/src/mito2/src/test_util.rs b/src/mito2/src/test_util.rs
index 08c43dd31c84..dcc461ab0991 100644
--- a/src/mito2/src/test_util.rs
+++ b/src/mito2/src/test_util.rs
@@ -365,8 +365,7 @@ impl TestEnv {
.display()
.to_string();
let mut builder = Fs::default();
- builder.root(&data_path);
- let object_store = ObjectStore::new(builder).unwrap().finish();
+ let object_store = ObjectStore::new(builder.root(&data_path)).unwrap().finish();
object_store_manager.add(storage_name, object_store);
}
let object_store_manager = Arc::new(object_store_manager);
@@ -553,8 +552,7 @@ impl TestEnv {
fn create_object_store_manager(&self) -> ObjectStoreManager {
let data_home = self.data_home.path();
let data_path = data_home.join("data").as_path().display().to_string();
- let mut builder = Fs::default();
- builder.root(&data_path);
+ let builder = Fs::default().root(&data_path);
let object_store = ObjectStore::new(builder).unwrap().finish();
ObjectStoreManager::new("default", object_store)
}
@@ -570,9 +568,10 @@ impl TestEnv {
let data_home = self.data_home.path();
let manifest_dir = data_home.join("manifest").as_path().display().to_string();
- let mut builder = Fs::default();
- builder.root(&manifest_dir);
- let object_store = ObjectStore::new(builder).unwrap().finish();
+ let builder = Fs::default();
+ let object_store = ObjectStore::new(builder.root(&manifest_dir))
+ .unwrap()
+ .finish();
// The "manifest_dir" here should be the relative path from the `object_store`'s root.
// Otherwise the OpenDal's list operation would fail with "StripPrefixError". This is
diff --git a/src/mito2/src/test_util/scheduler_util.rs b/src/mito2/src/test_util/scheduler_util.rs
index a6ffe0b2bf97..c1b85279deda 100644
--- a/src/mito2/src/test_util/scheduler_util.rs
+++ b/src/mito2/src/test_util/scheduler_util.rs
@@ -52,8 +52,7 @@ impl SchedulerEnv {
pub(crate) async fn new() -> SchedulerEnv {
let path = create_temp_dir("");
let path_str = path.path().display().to_string();
- let mut builder = Fs::default();
- builder.root(&path_str);
+ let builder = Fs::default().root(&path_str);
let index_aux_path = path.path().join("index_aux");
let puffin_mgr = PuffinManagerFactory::new(&index_aux_path, 4096, None)
diff --git a/src/object-store/Cargo.toml b/src/object-store/Cargo.toml
index 00bb5a93acfd..ca2a3a7ab32f 100644
--- a/src/object-store/Cargo.toml
+++ b/src/object-store/Cargo.toml
@@ -17,7 +17,7 @@ futures.workspace = true
lazy_static.workspace = true
md5 = "0.7"
moka = { workspace = true, features = ["future"] }
-opendal = { version = "0.47", features = [
+opendal = { version = "0.48", features = [
"layers-tracing",
"services-azblob",
"services-fs",
diff --git a/src/object-store/src/layers/lru_cache.rs b/src/object-store/src/layers/lru_cache.rs
index ded6afe58bb6..3fea6945e745 100644
--- a/src/object-store/src/layers/lru_cache.rs
+++ b/src/object-store/src/layers/lru_cache.rs
@@ -25,12 +25,19 @@ use common_telemetry::info;
use read_cache::ReadCache;
/// An opendal layer with local LRU file cache supporting.
-#[derive(Clone)]
pub struct LruCacheLayer<C: Access> {
// The read cache
read_cache: ReadCache<C>,
}
+impl<C: Access> Clone for LruCacheLayer<C> {
+ fn clone(&self) -> Self {
+ Self {
+ read_cache: self.read_cache.clone(),
+ }
+ }
+}
+
impl<C: Access> LruCacheLayer<C> {
/// Create a `[LruCacheLayer]` with local file cache and capacity in bytes.
pub async fn new(file_cache: Arc<C>, capacity: usize) -> Result<Self> {
diff --git a/src/object-store/src/layers/prometheus.rs b/src/object-store/src/layers/prometheus.rs
index 5a2d0b603261..29897db98711 100644
--- a/src/object-store/src/layers/prometheus.rs
+++ b/src/object-store/src/layers/prometheus.rs
@@ -552,11 +552,12 @@ impl<R: oio::BlockingRead> oio::BlockingRead for PrometheusMetricWrapper<R> {
}
impl<R: oio::Write> oio::Write for PrometheusMetricWrapper<R> {
- async fn write(&mut self, bs: Buffer) -> Result<usize> {
+ async fn write(&mut self, bs: Buffer) -> Result<()> {
+ let bytes = bs.len();
match self.inner.write(bs).await {
- Ok(n) => {
- self.bytes += n as u64;
- Ok(n)
+ Ok(_) => {
+ self.bytes += bytes as u64;
+ Ok(())
}
Err(err) => {
increment_errors_total(self.op, err.kind());
@@ -581,12 +582,12 @@ impl<R: oio::Write> oio::Write for PrometheusMetricWrapper<R> {
}
impl<R: oio::BlockingWrite> oio::BlockingWrite for PrometheusMetricWrapper<R> {
- fn write(&mut self, bs: Buffer) -> Result<usize> {
+ fn write(&mut self, bs: Buffer) -> Result<()> {
+ let bytes = bs.len();
self.inner
.write(bs)
- .map(|n| {
- self.bytes += n as u64;
- n
+ .map(|_| {
+ self.bytes += bytes as u64;
})
.map_err(|err| {
increment_errors_total(self.op, err.kind());
diff --git a/src/object-store/src/manager.rs b/src/object-store/src/manager.rs
index fb6d73321967..6513923b52cc 100644
--- a/src/object-store/src/manager.rs
+++ b/src/object-store/src/manager.rs
@@ -61,8 +61,7 @@ mod tests {
fn new_object_store(dir: &TempDir) -> ObjectStore {
let store_dir = dir.path().to_str().unwrap();
- let mut builder = Builder::default();
- let _ = builder.root(store_dir);
+ let builder = Builder::default().root(store_dir);
ObjectStore::new(builder).unwrap().finish()
}
diff --git a/src/object-store/tests/object_store_test.rs b/src/object-store/tests/object_store_test.rs
index b5cedf6e651a..868cce33eefa 100644
--- a/src/object-store/tests/object_store_test.rs
+++ b/src/object-store/tests/object_store_test.rs
@@ -95,8 +95,7 @@ async fn test_object_list(store: &ObjectStore) -> Result<()> {
async fn test_fs_backend() -> Result<()> {
let data_dir = create_temp_dir("test_fs_backend");
let tmp_dir = create_temp_dir("test_fs_backend");
- let mut builder = Fs::default();
- let _ = builder
+ let builder = Fs::default()
.root(&data_dir.path().to_string_lossy())
.atomic_write_dir(&tmp_dir.path().to_string_lossy());
@@ -117,8 +116,7 @@ async fn test_s3_backend() -> Result<()> {
let root = uuid::Uuid::new_v4().to_string();
- let mut builder = S3::default();
- let _ = builder
+ let builder = S3::default()
.root(&root)
.access_key_id(&env::var("GT_S3_ACCESS_KEY_ID")?)
.secret_access_key(&env::var("GT_S3_ACCESS_KEY")?)
@@ -146,8 +144,7 @@ async fn test_oss_backend() -> Result<()> {
let root = uuid::Uuid::new_v4().to_string();
- let mut builder = Oss::default();
- let _ = builder
+ let builder = Oss::default()
.root(&root)
.access_key_id(&env::var("GT_OSS_ACCESS_KEY_ID")?)
.access_key_secret(&env::var("GT_OSS_ACCESS_KEY")?)
@@ -174,8 +171,7 @@ async fn test_azblob_backend() -> Result<()> {
let root = uuid::Uuid::new_v4().to_string();
- let mut builder = Azblob::default();
- let _ = builder
+ let builder = Azblob::default()
.root(&root)
.account_name(&env::var("GT_AZBLOB_ACCOUNT_NAME")?)
.account_key(&env::var("GT_AZBLOB_ACCOUNT_KEY")?)
@@ -199,8 +195,7 @@ async fn test_gcs_backend() -> Result<()> {
if !container.is_empty() {
info!("Running azblob test.");
- let mut builder = Gcs::default();
- builder
+ let builder = Gcs::default()
.root(&uuid::Uuid::new_v4().to_string())
.bucket(&env::var("GT_GCS_BUCKET").unwrap())
.scope(&env::var("GT_GCS_SCOPE").unwrap())
@@ -224,8 +219,7 @@ async fn test_file_backend_with_lru_cache() -> Result<()> {
let data_dir = create_temp_dir("test_file_backend_with_lru_cache");
let tmp_dir = create_temp_dir("test_file_backend_with_lru_cache");
- let mut builder = Fs::default();
- let _ = builder
+ let builder = Fs::default()
.root(&data_dir.path().to_string_lossy())
.atomic_write_dir(&tmp_dir.path().to_string_lossy());
@@ -233,8 +227,7 @@ async fn test_file_backend_with_lru_cache() -> Result<()> {
let cache_dir = create_temp_dir("test_file_backend_with_lru_cache");
let cache_layer = {
- let mut builder = Fs::default();
- let _ = builder
+ let builder = Fs::default()
.root(&cache_dir.path().to_string_lossy())
.atomic_write_dir(&cache_dir.path().to_string_lossy());
let file_cache = Arc::new(builder.build().unwrap());
@@ -307,8 +300,7 @@ async fn test_object_store_cache_policy() -> Result<()> {
// create file cache layer
let cache_dir = create_temp_dir("test_object_store_cache_policy_cache");
let atomic_temp_dir = create_temp_dir("test_object_store_cache_policy_cache_tmp");
- let mut builder = Fs::default();
- let _ = builder
+ let builder = Fs::default()
.root(&cache_dir.path().to_string_lossy())
.atomic_write_dir(&atomic_temp_dir.path().to_string_lossy());
let file_cache = Arc::new(builder.build().unwrap());
diff --git a/src/operator/src/statement/copy_database.rs b/src/operator/src/statement/copy_database.rs
index 64808fa86a04..a4b1b9267b2c 100644
--- a/src/operator/src/statement/copy_database.rs
+++ b/src/operator/src/statement/copy_database.rs
@@ -244,8 +244,7 @@ mod tests {
async fn test_list_files_and_parse_table_name() {
let dir = common_test_util::temp_dir::create_temp_dir("test_list_files_to_copy");
let store_dir = normalize_dir(dir.path().to_str().unwrap());
- let mut builder = Fs::default();
- let _ = builder.root(&store_dir);
+ let builder = Fs::default().root(&store_dir);
let object_store = ObjectStore::new(builder).unwrap().finish();
object_store.write("a.parquet", "").await.unwrap();
object_store.write("b.parquet", "").await.unwrap();
diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs
index a1b132dad0c2..cefd8bed6b94 100644
--- a/tests-integration/src/test_util.rs
+++ b/tests-integration/src/test_util.rs
@@ -164,8 +164,7 @@ pub fn get_test_store_config(store_type: &StorageType) -> (ObjectStoreConfig, Te
..Default::default()
};
- let mut builder = Gcs::default();
- builder
+ let builder = Gcs::default()
.root(&gcs_config.root)
.bucket(&gcs_config.bucket)
.scope(&gcs_config.scope)
@@ -186,8 +185,7 @@ pub fn get_test_store_config(store_type: &StorageType) -> (ObjectStoreConfig, Te
..Default::default()
};
- let mut builder = Azblob::default();
- let _ = builder
+ let mut builder = Azblob::default()
.root(&azblob_config.root)
.endpoint(&azblob_config.endpoint)
.account_name(azblob_config.account_name.expose_secret())
@@ -195,8 +193,8 @@ pub fn get_test_store_config(store_type: &StorageType) -> (ObjectStoreConfig, Te
.container(&azblob_config.container);
if let Ok(sas_token) = env::var("GT_AZBLOB_SAS_TOKEN") {
- let _ = builder.sas_token(&sas_token);
- }
+ builder = builder.sas_token(&sas_token);
+ };
let config = ObjectStoreConfig::Azblob(azblob_config);
@@ -214,8 +212,7 @@ pub fn get_test_store_config(store_type: &StorageType) -> (ObjectStoreConfig, Te
..Default::default()
};
- let mut builder = Oss::default();
- let _ = builder
+ let builder = Oss::default()
.root(&oss_config.root)
.endpoint(&oss_config.endpoint)
.access_key_id(oss_config.access_key_id.expose_secret())
@@ -235,19 +232,18 @@ pub fn get_test_store_config(store_type: &StorageType) -> (ObjectStoreConfig, Te
s3_config.cache.cache_path = Some("/tmp/greptimedb_cache".to_string());
}
- let mut builder = S3::default();
- let _ = builder
+ let mut builder = S3::default()
.root(&s3_config.root)
.access_key_id(s3_config.access_key_id.expose_secret())
.secret_access_key(s3_config.secret_access_key.expose_secret())
.bucket(&s3_config.bucket);
if s3_config.endpoint.is_some() {
- let _ = builder.endpoint(s3_config.endpoint.as_ref().unwrap());
- }
+ builder = builder.endpoint(s3_config.endpoint.as_ref().unwrap());
+ };
if s3_config.region.is_some() {
- let _ = builder.region(s3_config.region.as_ref().unwrap());
- }
+ builder = builder.region(s3_config.region.as_ref().unwrap());
+ };
let config = ObjectStoreConfig::S3(s3_config);
|
chore
|
bump opendal version to 0.48 (#4499)
|
f86390345ca599da13c2e848a4686d0e3218e1e9
|
2023-05-08 19:23:45
|
localhost
|
chore: remove useless Option type in plugins (#1544)
| false
| null |
chore
|
remove useless Option type in plugins (#1544)
|
aa4d10eef756cac2adabeb87a3bc20f50844c045
|
2024-07-08 18:06:59
|
Lei, HUANG
|
feat(inverted_index): inverted index cache (#4309)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 098968da0dfb..580db0079e78 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -738,9 +738,12 @@ dependencies = [
[[package]]
name = "atomic"
-version = "0.5.3"
+version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c59bdb34bc650a32731b31bd8f0829cc15d24a708ee31559e0bb34f2bc320cba"
+checksum = "8d818003e740b63afc82337e3160717f4f63078720a810b7b903e70a5d1d2994"
+dependencies = [
+ "bytemuck",
+]
[[package]]
name = "atomic-waker"
@@ -4993,6 +4996,7 @@ dependencies = [
"tempfile",
"tokio",
"tokio-util",
+ "uuid",
]
[[package]]
@@ -12896,9 +12900,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
[[package]]
name = "uuid"
-version = "1.8.0"
+version = "1.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0"
+checksum = "5de17fd2f7da591098415cff336e12965a28061ddace43b59cb3c430179c9439"
dependencies = [
"atomic",
"getrandom",
@@ -12909,9 +12913,9 @@ dependencies = [
[[package]]
name = "uuid-macro-internal"
-version = "1.8.0"
+version = "1.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9881bea7cbe687e36c9ab3b778c36cd0487402e270304e8b1296d5085303c1a2"
+checksum = "a3ff64d5cde1e2cb5268bdb497235b6bd255ba8244f910dbc3574e59593de68c"
dependencies = [
"proc-macro2",
"quote",
diff --git a/src/index/Cargo.toml b/src/index/Cargo.toml
index c1e76e4488a2..8e2c05c6ad05 100644
--- a/src/index/Cargo.toml
+++ b/src/index/Cargo.toml
@@ -29,6 +29,7 @@ snafu.workspace = true
tantivy = { version = "0.22", features = ["zstd-compression"] }
tantivy-jieba = "0.11.0"
tokio.workspace = true
+uuid.workspace = true
[dev-dependencies]
common-test-util.workspace = true
diff --git a/src/index/src/inverted_index/format/reader.rs b/src/index/src/inverted_index/format/reader.rs
index 683a56561663..a6fb0cecbfcd 100644
--- a/src/index/src/inverted_index/format/reader.rs
+++ b/src/index/src/inverted_index/format/reader.rs
@@ -12,27 +12,41 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-mod blob;
-mod footer;
+use std::sync::Arc;
use async_trait::async_trait;
use common_base::BitVec;
use greptime_proto::v1::index::InvertedIndexMetas;
+use snafu::ResultExt;
-use crate::inverted_index::error::Result;
+use crate::inverted_index::error::{DecodeFstSnafu, Result};
pub use crate::inverted_index::format::reader::blob::InvertedIndexBlobReader;
use crate::inverted_index::FstMap;
+mod blob;
+mod footer;
+
/// InvertedIndexReader defines an asynchronous reader of inverted index data
#[mockall::automock]
#[async_trait]
pub trait InvertedIndexReader: Send {
- /// Retrieve metadata of all inverted indices stored within the blob.
- async fn metadata(&mut self) -> Result<InvertedIndexMetas>;
+ /// Reads all data to dest.
+ async fn read_all(&mut self, dest: &mut Vec<u8>) -> Result<usize>;
+
+ /// Seeks to given offset and reads data with exact size as provided.
+ async fn seek_read(&mut self, offset: u64, size: u32) -> Result<Vec<u8>>;
+
+ /// Retrieves metadata of all inverted indices stored within the blob.
+ async fn metadata(&mut self) -> Result<Arc<InvertedIndexMetas>>;
- /// Retrieve the finite state transducer (FST) map from the given offset and size.
- async fn fst(&mut self, offset: u64, size: u32) -> Result<FstMap>;
+ /// Retrieves the finite state transducer (FST) map from the given offset and size.
+ async fn fst(&mut self, offset: u64, size: u32) -> Result<FstMap> {
+ let fst_data = self.seek_read(offset, size).await?;
+ FstMap::new(fst_data).context(DecodeFstSnafu)
+ }
- /// Retrieve the bitmap from the given offset and size.
- async fn bitmap(&mut self, offset: u64, size: u32) -> Result<BitVec>;
+ /// Retrieves the bitmap from the given offset and size.
+ async fn bitmap(&mut self, offset: u64, size: u32) -> Result<BitVec> {
+ self.seek_read(offset, size).await.map(BitVec::from_vec)
+ }
}
diff --git a/src/index/src/inverted_index/format/reader/blob.rs b/src/index/src/inverted_index/format/reader/blob.rs
index 99f2f93239a3..3a6274f5f90b 100644
--- a/src/index/src/inverted_index/format/reader/blob.rs
+++ b/src/index/src/inverted_index/format/reader/blob.rs
@@ -13,18 +13,16 @@
// limitations under the License.
use std::io::SeekFrom;
+use std::sync::Arc;
use async_trait::async_trait;
-use common_base::BitVec;
use futures::{AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt};
use greptime_proto::v1::index::InvertedIndexMetas;
use snafu::{ensure, ResultExt};
-use crate::inverted_index::error::{
- DecodeFstSnafu, ReadSnafu, Result, SeekSnafu, UnexpectedBlobSizeSnafu,
-};
+use crate::inverted_index::error::{ReadSnafu, Result, SeekSnafu, UnexpectedBlobSizeSnafu};
use crate::inverted_index::format::reader::footer::InvertedIndeFooterReader;
-use crate::inverted_index::format::reader::{FstMap, InvertedIndexReader};
+use crate::inverted_index::format::reader::InvertedIndexReader;
use crate::inverted_index::format::MIN_BLOB_SIZE;
/// Inverted index blob reader, implements [`InvertedIndexReader`]
@@ -52,35 +50,31 @@ impl<R> InvertedIndexBlobReader<R> {
#[async_trait]
impl<R: AsyncRead + AsyncSeek + Unpin + Send> InvertedIndexReader for InvertedIndexBlobReader<R> {
- async fn metadata(&mut self) -> Result<InvertedIndexMetas> {
- let end = SeekFrom::End(0);
- let blob_size = self.source.seek(end).await.context(SeekSnafu)?;
- Self::validate_blob_size(blob_size)?;
-
- let mut footer_reader = InvertedIndeFooterReader::new(&mut self.source, blob_size);
- footer_reader.metadata().await
- }
-
- async fn fst(&mut self, offset: u64, size: u32) -> Result<FstMap> {
+ async fn read_all(&mut self, dest: &mut Vec<u8>) -> Result<usize> {
self.source
- .seek(SeekFrom::Start(offset))
+ .seek(SeekFrom::Start(0))
.await
.context(SeekSnafu)?;
- let mut buf = vec![0u8; size as usize];
- self.source.read_exact(&mut buf).await.context(ReadSnafu)?;
-
- FstMap::new(buf).context(DecodeFstSnafu)
+ self.source.read_to_end(dest).await.context(ReadSnafu)
}
- async fn bitmap(&mut self, offset: u64, size: u32) -> Result<BitVec> {
+ async fn seek_read(&mut self, offset: u64, size: u32) -> Result<Vec<u8>> {
self.source
.seek(SeekFrom::Start(offset))
.await
.context(SeekSnafu)?;
let mut buf = vec![0u8; size as usize];
- self.source.read_exact(&mut buf).await.context(ReadSnafu)?;
+ self.source.read(&mut buf).await.context(ReadSnafu)?;
+ Ok(buf)
+ }
+
+ async fn metadata(&mut self) -> Result<Arc<InvertedIndexMetas>> {
+ let end = SeekFrom::End(0);
+ let blob_size = self.source.seek(end).await.context(SeekSnafu)?;
+ Self::validate_blob_size(blob_size)?;
- Ok(BitVec::from_vec(buf))
+ let mut footer_reader = InvertedIndeFooterReader::new(&mut self.source, blob_size);
+ footer_reader.metadata().await.map(Arc::new)
}
}
diff --git a/src/index/src/inverted_index/search/index_apply/predicates_apply.rs b/src/index/src/inverted_index/search/index_apply/predicates_apply.rs
index 85928d9183c2..3b21e21dc194 100644
--- a/src/index/src/inverted_index/search/index_apply/predicates_apply.rs
+++ b/src/index/src/inverted_index/search/index_apply/predicates_apply.rs
@@ -148,6 +148,8 @@ impl TryFrom<Vec<(String, Vec<Predicate>)>> for PredicatesIndexApplier {
#[cfg(test)]
mod tests {
+ use std::sync::Arc;
+
use common_base::bit_vec::prelude::*;
use greptime_proto::v1::index::InvertedIndexMeta;
@@ -161,7 +163,7 @@ mod tests {
s.to_owned()
}
- fn mock_metas(tags: impl IntoIterator<Item = (&'static str, u32)>) -> InvertedIndexMetas {
+ fn mock_metas(tags: impl IntoIterator<Item = (&'static str, u32)>) -> Arc<InvertedIndexMetas> {
let mut metas = InvertedIndexMetas {
total_row_count: 8,
segment_row_count: 1,
@@ -175,7 +177,7 @@ mod tests {
};
metas.metas.insert(s(tag), meta);
}
- metas
+ Arc::new(metas)
}
fn key_fst_applier(value: &'static str) -> Box<dyn FstApplier> {
@@ -300,11 +302,11 @@ mod tests {
async fn test_index_applier_with_empty_index() {
let mut mock_reader = MockInvertedIndexReader::new();
mock_reader.expect_metadata().returning(move || {
- Ok(InvertedIndexMetas {
+ Ok(Arc::new(InvertedIndexMetas {
total_row_count: 0, // No rows
segment_row_count: 1,
..Default::default()
- })
+ }))
});
let mut mock_fst_applier = MockFstApplier::new();
diff --git a/src/mito2/src/cache.rs b/src/mito2/src/cache.rs
index 892ee4fb9274..c36bcdbb83fb 100644
--- a/src/mito2/src/cache.rs
+++ b/src/mito2/src/cache.rs
@@ -17,6 +17,7 @@
mod cache_size;
pub(crate) mod file_cache;
+pub(crate) mod index;
#[cfg(test)]
pub(crate) mod test_util;
pub(crate) mod write_cache;
@@ -33,6 +34,7 @@ use store_api::storage::{ConcreteDataType, RegionId};
use crate::cache::cache_size::parquet_meta_size;
use crate::cache::file_cache::{FileType, IndexKey};
+use crate::cache::index::{InvertedIndexCache, InvertedIndexCacheRef};
use crate::cache::write_cache::WriteCacheRef;
use crate::metrics::{CACHE_BYTES, CACHE_HIT, CACHE_MISS};
use crate::sst::file::FileId;
@@ -59,6 +61,8 @@ pub struct CacheManager {
page_cache: Option<PageCache>,
/// A Cache for writing files to object stores.
write_cache: Option<WriteCacheRef>,
+ /// Cache for inverted index.
+ index_cache: Option<InvertedIndexCacheRef>,
}
pub type CacheManagerRef = Arc<CacheManager>;
@@ -167,6 +171,10 @@ impl CacheManager {
pub(crate) fn write_cache(&self) -> Option<&WriteCacheRef> {
self.write_cache.as_ref()
}
+
+ pub(crate) fn index_cache(&self) -> Option<&InvertedIndexCacheRef> {
+ self.index_cache.as_ref()
+ }
}
/// Builder to construct a [CacheManager].
@@ -175,6 +183,8 @@ pub struct CacheManagerBuilder {
sst_meta_cache_size: u64,
vector_cache_size: u64,
page_cache_size: u64,
+ index_metadata_size: u64,
+ index_content_size: u64,
write_cache: Option<WriteCacheRef>,
}
@@ -203,6 +213,18 @@ impl CacheManagerBuilder {
self
}
+ /// Sets cache size for index metadata.
+ pub fn index_metadata_size(mut self, bytes: u64) -> Self {
+ self.index_metadata_size = bytes;
+ self
+ }
+
+ /// Sets cache size for index content.
+ pub fn index_content_size(mut self, bytes: u64) -> Self {
+ self.index_content_size = bytes;
+ self
+ }
+
/// Builds the [CacheManager].
pub fn build(self) -> CacheManager {
let sst_meta_cache = (self.sst_meta_cache_size != 0).then(|| {
@@ -240,11 +262,14 @@ impl CacheManagerBuilder {
.build()
});
+ let inverted_index_cache =
+ InvertedIndexCache::new(self.index_metadata_size, self.index_content_size);
CacheManager {
sst_meta_cache,
vector_cache,
page_cache,
write_cache: self.write_cache,
+ index_cache: Some(Arc::new(inverted_index_cache)),
}
}
}
diff --git a/src/mito2/src/cache/index.rs b/src/mito2/src/cache/index.rs
new file mode 100644
index 000000000000..4e6e4deee260
--- /dev/null
+++ b/src/mito2/src/cache/index.rs
@@ -0,0 +1,211 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use api::v1::index::InvertedIndexMetas;
+use async_trait::async_trait;
+use common_base::BitVec;
+use index::inverted_index::error::DecodeFstSnafu;
+use index::inverted_index::format::reader::InvertedIndexReader;
+use index::inverted_index::FstMap;
+use prost::Message;
+use snafu::ResultExt;
+
+use crate::metrics::{CACHE_BYTES, CACHE_HIT, CACHE_MISS};
+use crate::sst::file::FileId;
+
+/// Metrics for index metadata.
+const INDEX_METADATA_TYPE: &str = "index_metadata";
+/// Metrics for index content.
+const INDEX_CONTENT_TYPE: &str = "index_content";
+
+/// Inverted index blob reader with cache.
+pub struct CachedInvertedIndexBlobReader<R> {
+ file_id: FileId,
+ inner: R,
+ cache: InvertedIndexCacheRef,
+}
+
+impl<R> CachedInvertedIndexBlobReader<R> {
+ pub fn new(file_id: FileId, inner: R, cache: InvertedIndexCacheRef) -> Self {
+ Self {
+ file_id,
+ inner,
+ cache,
+ }
+ }
+}
+
+impl<R> CachedInvertedIndexBlobReader<R>
+where
+ R: InvertedIndexReader,
+{
+ /// Gets given range of index data from cache, and loads from source if the file
+ /// is not already cached.
+ async fn get_or_load(
+ &mut self,
+ offset: u64,
+ size: u32,
+ ) -> index::inverted_index::error::Result<Vec<u8>> {
+ let range = offset as usize..(offset + size as u64) as usize;
+ if let Some(cached) = self.cache.get_index(IndexKey {
+ file_id: self.file_id,
+ }) {
+ CACHE_HIT.with_label_values(&[INDEX_CONTENT_TYPE]).inc();
+ Ok(cached[range].to_vec())
+ } else {
+ let mut all_data = Vec::with_capacity(1024 * 1024);
+ self.inner.read_all(&mut all_data).await?;
+ let result = all_data[range].to_vec();
+ self.cache.put_index(
+ IndexKey {
+ file_id: self.file_id,
+ },
+ Arc::new(all_data),
+ );
+ CACHE_MISS.with_label_values(&[INDEX_CONTENT_TYPE]).inc();
+ Ok(result)
+ }
+ }
+}
+
+#[async_trait]
+impl<R: InvertedIndexReader> InvertedIndexReader for CachedInvertedIndexBlobReader<R> {
+ async fn read_all(
+ &mut self,
+ dest: &mut Vec<u8>,
+ ) -> index::inverted_index::error::Result<usize> {
+ self.inner.read_all(dest).await
+ }
+
+ async fn seek_read(
+ &mut self,
+ offset: u64,
+ size: u32,
+ ) -> index::inverted_index::error::Result<Vec<u8>> {
+ self.inner.seek_read(offset, size).await
+ }
+
+ async fn metadata(&mut self) -> index::inverted_index::error::Result<Arc<InvertedIndexMetas>> {
+ if let Some(cached) = self.cache.get_index_metadata(self.file_id) {
+ CACHE_HIT.with_label_values(&[INDEX_METADATA_TYPE]).inc();
+ Ok(cached)
+ } else {
+ let meta = self.inner.metadata().await?;
+ self.cache.put_index_metadata(self.file_id, meta.clone());
+ CACHE_MISS.with_label_values(&[INDEX_METADATA_TYPE]).inc();
+ Ok(meta)
+ }
+ }
+
+ async fn fst(
+ &mut self,
+ offset: u64,
+ size: u32,
+ ) -> index::inverted_index::error::Result<FstMap> {
+ self.get_or_load(offset, size)
+ .await
+ .and_then(|r| FstMap::new(r).context(DecodeFstSnafu))
+ }
+
+ async fn bitmap(
+ &mut self,
+ offset: u64,
+ size: u32,
+ ) -> index::inverted_index::error::Result<BitVec> {
+ self.get_or_load(offset, size).await.map(BitVec::from_vec)
+ }
+}
+
+#[derive(Debug, Clone, PartialEq, Eq, Hash)]
+pub struct IndexKey {
+ file_id: FileId,
+}
+
+pub type InvertedIndexCacheRef = Arc<InvertedIndexCache>;
+
+pub struct InvertedIndexCache {
+ /// Cache for inverted index metadata
+ index_metadata: moka::sync::Cache<IndexKey, Arc<InvertedIndexMetas>>,
+ /// Cache for inverted index content.
+ index: moka::sync::Cache<IndexKey, Arc<Vec<u8>>>,
+}
+
+impl InvertedIndexCache {
+ /// Creates `InvertedIndexCache` with provided `index_metadata_cap` and `index_content_cap`.
+ pub fn new(index_metadata_cap: u64, index_content_cap: u64) -> Self {
+ common_telemetry::debug!("Building InvertedIndexCache with metadata size: {index_metadata_cap}, content size: {index_content_cap}");
+ let index_metadata = moka::sync::CacheBuilder::new(index_metadata_cap)
+ .name("inverted_index_metadata")
+ .weigher(index_metadata_weight)
+ .eviction_listener(|k, v, _cause| {
+ let size = index_metadata_weight(&k, &v);
+ CACHE_BYTES
+ .with_label_values(&[INDEX_METADATA_TYPE])
+ .sub(size.into());
+ })
+ .build();
+ let index_cache = moka::sync::CacheBuilder::new(index_content_cap)
+ .name("inverted_index_content")
+ .weigher(index_content_weight)
+ .eviction_listener(|k, v, _cause| {
+ let size = index_content_weight(&k, &v);
+ CACHE_BYTES
+ .with_label_values(&[INDEX_CONTENT_TYPE])
+ .sub(size.into());
+ })
+ .build();
+ Self {
+ index_metadata,
+ index: index_cache,
+ }
+ }
+}
+
+impl InvertedIndexCache {
+ pub fn get_index_metadata(&self, file_id: FileId) -> Option<Arc<InvertedIndexMetas>> {
+ self.index_metadata.get(&IndexKey { file_id })
+ }
+
+ pub fn put_index_metadata(&self, file_id: FileId, metadata: Arc<InvertedIndexMetas>) {
+ let key = IndexKey { file_id };
+ CACHE_BYTES
+ .with_label_values(&[INDEX_METADATA_TYPE])
+ .add(index_metadata_weight(&key, &metadata).into());
+ self.index_metadata.insert(key, metadata)
+ }
+
+ // todo(hl): align index file content to pages with size like 4096 bytes.
+ pub fn get_index(&self, key: IndexKey) -> Option<Arc<Vec<u8>>> {
+ self.index.get(&key)
+ }
+
+ pub fn put_index(&self, key: IndexKey, value: Arc<Vec<u8>>) {
+ CACHE_BYTES
+ .with_label_values(&[INDEX_CONTENT_TYPE])
+ .add(index_content_weight(&key, &value).into());
+ self.index.insert(key, value);
+ }
+}
+
+/// Calculates weight for index metadata.
+fn index_metadata_weight(k: &IndexKey, v: &Arc<InvertedIndexMetas>) -> u32 {
+ (k.file_id.as_bytes().len() + v.encoded_len()) as u32
+}
+
+/// Calculates weight for index content.
+fn index_content_weight(k: &IndexKey, v: &Arc<Vec<u8>>) -> u32 {
+ (k.file_id.as_bytes().len() + v.len()) as u32
+}
diff --git a/src/mito2/src/config.rs b/src/mito2/src/config.rs
index ca44342a6d4c..7919aeb4ca5e 100644
--- a/src/mito2/src/config.rs
+++ b/src/mito2/src/config.rs
@@ -381,6 +381,11 @@ pub struct InvertedIndexConfig {
#[deprecated = "use [IndexConfig::write_buffer_size] instead"]
#[serde(skip_serializing)]
pub write_buffer_size: ReadableSize,
+
+ /// Cache size for metadata of inverted index. Setting it to 0 to disable the cache.
+ pub metadata_cache_size: ReadableSize,
+ /// Cache size for inverted index content. Setting it to 0 to disable the cache.
+ pub content_cache_size: ReadableSize,
}
impl Default for InvertedIndexConfig {
@@ -392,9 +397,10 @@ impl Default for InvertedIndexConfig {
apply_on_query: Mode::Auto,
mem_threshold_on_create: MemoryThreshold::Auto,
compress: true,
-
write_buffer_size: ReadableSize::mb(8),
intermediate_path: String::new(),
+ metadata_cache_size: ReadableSize::mb(32),
+ content_cache_size: ReadableSize::mb(32),
}
}
}
diff --git a/src/mito2/src/read/scan_region.rs b/src/mito2/src/read/scan_region.rs
index ce4f789b9cf4..d5c128c6bf5d 100644
--- a/src/mito2/src/read/scan_region.rs
+++ b/src/mito2/src/read/scan_region.rs
@@ -330,10 +330,17 @@ impl ScanRegion {
Some(file_cache)
}();
+ let index_cache = self
+ .cache_manager
+ .as_ref()
+ .and_then(|c| c.index_cache())
+ .cloned();
+
SstIndexApplierBuilder::new(
self.access_layer.region_dir().to_string(),
self.access_layer.object_store().clone(),
file_cache,
+ index_cache,
self.version.metadata.as_ref(),
self.version
.options
diff --git a/src/mito2/src/sst/file.rs b/src/mito2/src/sst/file.rs
index 61e6a5537691..4852a3f32049 100644
--- a/src/mito2/src/sst/file.rs
+++ b/src/mito2/src/sst/file.rs
@@ -63,6 +63,17 @@ impl FileId {
pub fn as_puffin(&self) -> String {
format!("{}{}", self, ".puffin")
}
+
+ /// Converts [FileId] as byte slice.
+ pub fn as_bytes(&self) -> &[u8] {
+ self.0.as_bytes()
+ }
+}
+
+impl From<FileId> for Uuid {
+ fn from(value: FileId) -> Self {
+ value.0
+ }
}
impl fmt::Display for FileId {
diff --git a/src/mito2/src/sst/index/inverted_index/applier.rs b/src/mito2/src/sst/index/inverted_index/applier.rs
index 7463f6011fca..566af65d2213 100644
--- a/src/mito2/src/sst/index/inverted_index/applier.rs
+++ b/src/mito2/src/sst/index/inverted_index/applier.rs
@@ -27,6 +27,7 @@ use snafu::ResultExt;
use store_api::storage::RegionId;
use crate::cache::file_cache::{FileCacheRef, FileType, IndexKey};
+use crate::cache::index::{CachedInvertedIndexBlobReader, InvertedIndexCacheRef};
use crate::error::{ApplyIndexSnafu, PuffinBuildReaderSnafu, PuffinReadBlobSnafu, Result};
use crate::metrics::{INDEX_APPLY_ELAPSED, INDEX_APPLY_MEMORY_USAGE};
use crate::sst::file::FileId;
@@ -55,6 +56,9 @@ pub(crate) struct SstIndexApplier {
/// The puffin manager factory.
puffin_manager_factory: PuffinManagerFactory,
+
+ /// In-memory cache for inverted index.
+ inverted_index_cache: Option<InvertedIndexCacheRef>,
}
pub(crate) type SstIndexApplierRef = Arc<SstIndexApplier>;
@@ -66,6 +70,7 @@ impl SstIndexApplier {
region_id: RegionId,
store: ObjectStore,
file_cache: Option<FileCacheRef>,
+ index_cache: Option<InvertedIndexCacheRef>,
index_applier: Box<dyn IndexApplier>,
puffin_manager_factory: PuffinManagerFactory,
) -> Self {
@@ -78,6 +83,7 @@ impl SstIndexApplier {
file_cache,
index_applier,
puffin_manager_factory,
+ inverted_index_cache: index_cache,
}
}
@@ -99,13 +105,24 @@ impl SstIndexApplier {
self.remote_blob_reader(file_id).await?
}
};
- let mut blob_reader = InvertedIndexBlobReader::new(blob);
- let output = self
- .index_applier
- .apply(context, &mut blob_reader)
- .await
- .context(ApplyIndexSnafu)?;
- Ok(output)
+
+ if let Some(index_cache) = &self.inverted_index_cache {
+ let mut index_reader = CachedInvertedIndexBlobReader::new(
+ file_id,
+ InvertedIndexBlobReader::new(blob),
+ index_cache.clone(),
+ );
+ self.index_applier
+ .apply(context, &mut index_reader)
+ .await
+ .context(ApplyIndexSnafu)
+ } else {
+ let mut index_reader = InvertedIndexBlobReader::new(blob);
+ self.index_applier
+ .apply(context, &mut index_reader)
+ .await
+ .context(ApplyIndexSnafu)
+ }
}
/// Creates a blob reader from the cached index file.
@@ -200,6 +217,7 @@ mod tests {
RegionId::new(0, 0),
object_store,
None,
+ None,
Box::new(mock_index_applier),
puffin_manager_factory,
);
@@ -241,6 +259,7 @@ mod tests {
RegionId::new(0, 0),
object_store,
None,
+ None,
Box::new(mock_index_applier),
puffin_manager_factory,
);
diff --git a/src/mito2/src/sst/index/inverted_index/applier/builder.rs b/src/mito2/src/sst/index/inverted_index/applier/builder.rs
index 3dcb5c0ec8a3..0736b07fb6b9 100644
--- a/src/mito2/src/sst/index/inverted_index/applier/builder.rs
+++ b/src/mito2/src/sst/index/inverted_index/applier/builder.rs
@@ -34,6 +34,7 @@ use store_api::metadata::RegionMetadata;
use store_api::storage::ColumnId;
use crate::cache::file_cache::FileCacheRef;
+use crate::cache::index::InvertedIndexCacheRef;
use crate::error::{BuildIndexApplierSnafu, ColumnNotFoundSnafu, ConvertValueSnafu, Result};
use crate::row_converter::SortField;
use crate::sst::index::inverted_index::applier::SstIndexApplier;
@@ -62,6 +63,9 @@ pub(crate) struct SstIndexApplierBuilder<'a> {
/// The puffin manager factory.
puffin_manager_factory: PuffinManagerFactory,
+
+ /// Cache for inverted index.
+ index_cache: Option<InvertedIndexCacheRef>,
}
impl<'a> SstIndexApplierBuilder<'a> {
@@ -70,6 +74,7 @@ impl<'a> SstIndexApplierBuilder<'a> {
region_dir: String,
object_store: ObjectStore,
file_cache: Option<FileCacheRef>,
+ index_cache: Option<InvertedIndexCacheRef>,
metadata: &'a RegionMetadata,
ignore_column_ids: HashSet<ColumnId>,
puffin_manager_factory: PuffinManagerFactory,
@@ -81,6 +86,7 @@ impl<'a> SstIndexApplierBuilder<'a> {
metadata,
ignore_column_ids,
output: HashMap::default(),
+ index_cache,
puffin_manager_factory,
}
}
@@ -102,11 +108,13 @@ impl<'a> SstIndexApplierBuilder<'a> {
.map(|(column_id, predicates)| (column_id.to_string(), predicates))
.collect();
let applier = PredicatesIndexApplier::try_from(predicates);
+
Ok(Some(SstIndexApplier::new(
self.region_dir,
self.metadata.region_id,
self.object_store,
self.file_cache,
+ self.index_cache,
Box::new(applier.context(BuildIndexApplierSnafu)?),
self.puffin_manager_factory,
)))
@@ -320,6 +328,7 @@ mod tests {
"test".to_string(),
test_object_store(),
None,
+ None,
&metadata,
HashSet::default(),
facotry,
diff --git a/src/mito2/src/sst/index/inverted_index/applier/builder/between.rs b/src/mito2/src/sst/index/inverted_index/applier/builder/between.rs
index c35736d42bad..c7f1f90cf0ce 100644
--- a/src/mito2/src/sst/index/inverted_index/applier/builder/between.rs
+++ b/src/mito2/src/sst/index/inverted_index/applier/builder/between.rs
@@ -76,6 +76,7 @@ mod tests {
"test".to_string(),
test_object_store(),
None,
+ None,
&metadata,
HashSet::default(),
facotry,
@@ -118,6 +119,7 @@ mod tests {
"test".to_string(),
test_object_store(),
None,
+ None,
&metadata,
HashSet::default(),
facotry,
@@ -143,6 +145,7 @@ mod tests {
"test".to_string(),
test_object_store(),
None,
+ None,
&metadata,
HashSet::default(),
facotry,
@@ -168,6 +171,7 @@ mod tests {
"test".to_string(),
test_object_store(),
None,
+ None,
&metadata,
HashSet::default(),
facotry,
@@ -194,6 +198,7 @@ mod tests {
"test".to_string(),
test_object_store(),
None,
+ None,
&metadata,
HashSet::default(),
facotry,
diff --git a/src/mito2/src/sst/index/inverted_index/applier/builder/comparison.rs b/src/mito2/src/sst/index/inverted_index/applier/builder/comparison.rs
index 450e39ad7aee..b7870039115d 100644
--- a/src/mito2/src/sst/index/inverted_index/applier/builder/comparison.rs
+++ b/src/mito2/src/sst/index/inverted_index/applier/builder/comparison.rs
@@ -232,6 +232,7 @@ mod tests {
"test".to_string(),
test_object_store(),
None,
+ None,
&metadata,
HashSet::default(),
facotry,
@@ -260,6 +261,7 @@ mod tests {
"test".to_string(),
test_object_store(),
None,
+ None,
&metadata,
HashSet::default(),
facotry,
@@ -279,6 +281,7 @@ mod tests {
"test".to_string(),
test_object_store(),
None,
+ None,
&metadata,
HashSet::default(),
facotry,
@@ -299,6 +302,7 @@ mod tests {
"test".to_string(),
test_object_store(),
None,
+ None,
&metadata,
HashSet::default(),
facotry,
diff --git a/src/mito2/src/sst/index/inverted_index/applier/builder/eq_list.rs b/src/mito2/src/sst/index/inverted_index/applier/builder/eq_list.rs
index 24f677db1d78..c776cdb74361 100644
--- a/src/mito2/src/sst/index/inverted_index/applier/builder/eq_list.rs
+++ b/src/mito2/src/sst/index/inverted_index/applier/builder/eq_list.rs
@@ -138,6 +138,7 @@ mod tests {
"test".to_string(),
test_object_store(),
None,
+ None,
&metadata,
HashSet::default(),
facotry,
@@ -175,6 +176,7 @@ mod tests {
"test".to_string(),
test_object_store(),
None,
+ None,
&metadata,
HashSet::default(),
facotry,
@@ -195,6 +197,7 @@ mod tests {
"test".to_string(),
test_object_store(),
None,
+ None,
&metadata,
HashSet::default(),
facotry,
@@ -214,6 +217,7 @@ mod tests {
"test".to_string(),
test_object_store(),
None,
+ None,
&metadata,
HashSet::default(),
facotry,
@@ -233,6 +237,7 @@ mod tests {
"test".to_string(),
test_object_store(),
None,
+ None,
&metadata,
HashSet::default(),
facotry,
@@ -291,6 +296,7 @@ mod tests {
"test".to_string(),
test_object_store(),
None,
+ None,
&metadata,
HashSet::default(),
facotry,
@@ -328,6 +334,7 @@ mod tests {
"test".to_string(),
test_object_store(),
None,
+ None,
&metadata,
HashSet::default(),
facotry,
diff --git a/src/mito2/src/sst/index/inverted_index/applier/builder/in_list.rs b/src/mito2/src/sst/index/inverted_index/applier/builder/in_list.rs
index 146b58aeec04..1f23d3fa0cee 100644
--- a/src/mito2/src/sst/index/inverted_index/applier/builder/in_list.rs
+++ b/src/mito2/src/sst/index/inverted_index/applier/builder/in_list.rs
@@ -69,6 +69,7 @@ mod tests {
"test".to_string(),
test_object_store(),
None,
+ None,
&metadata,
HashSet::default(),
facotry,
@@ -101,6 +102,7 @@ mod tests {
"test".to_string(),
test_object_store(),
None,
+ None,
&metadata,
HashSet::default(),
facotry,
@@ -125,6 +127,7 @@ mod tests {
"test".to_string(),
test_object_store(),
None,
+ None,
&metadata,
HashSet::default(),
facotry,
@@ -149,6 +152,7 @@ mod tests {
"test".to_string(),
test_object_store(),
None,
+ None,
&metadata,
HashSet::default(),
facotry,
@@ -175,6 +179,7 @@ mod tests {
"test".to_string(),
test_object_store(),
None,
+ None,
&metadata,
HashSet::default(),
facotry,
diff --git a/src/mito2/src/sst/index/inverted_index/applier/builder/regex_match.rs b/src/mito2/src/sst/index/inverted_index/applier/builder/regex_match.rs
index 3c2122f4c028..ae19b5ef7ce8 100644
--- a/src/mito2/src/sst/index/inverted_index/applier/builder/regex_match.rs
+++ b/src/mito2/src/sst/index/inverted_index/applier/builder/regex_match.rs
@@ -63,6 +63,7 @@ mod tests {
"test".to_string(),
test_object_store(),
None,
+ None,
&metadata,
HashSet::default(),
facotry,
@@ -91,6 +92,7 @@ mod tests {
"test".to_string(),
test_object_store(),
None,
+ None,
&metadata,
HashSet::default(),
facotry,
@@ -112,6 +114,7 @@ mod tests {
"test".to_string(),
test_object_store(),
None,
+ None,
&metadata,
HashSet::default(),
facotry,
@@ -133,6 +136,7 @@ mod tests {
"test".to_string(),
test_object_store(),
None,
+ None,
&metadata,
HashSet::default(),
facotry,
diff --git a/src/mito2/src/sst/index/inverted_index/creator.rs b/src/mito2/src/sst/index/inverted_index/creator.rs
index 00aafad1595d..380661d60db9 100644
--- a/src/mito2/src/sst/index/inverted_index/creator.rs
+++ b/src/mito2/src/sst/index/inverted_index/creator.rs
@@ -304,6 +304,7 @@ mod tests {
use store_api::storage::RegionId;
use super::*;
+ use crate::cache::index::InvertedIndexCache;
use crate::row_converter::{McmpRowCodec, RowCodec, SortField};
use crate::sst::index::inverted_index::applier::builder::SstIndexApplierBuilder;
use crate::sst::index::puffin_manager::PuffinManagerFactory;
@@ -414,10 +415,12 @@ mod tests {
move |expr| {
let _d = &d;
+ let cache = Arc::new(InvertedIndexCache::new(10, 10));
let applier = SstIndexApplierBuilder::new(
region_dir.clone(),
object_store.clone(),
None,
+ Some(cache),
®ion_metadata,
Default::default(),
factory.clone(),
diff --git a/src/mito2/src/worker.rs b/src/mito2/src/worker.rs
index 2ffcc65fbe46..617950cd063b 100644
--- a/src/mito2/src/worker.rs
+++ b/src/mito2/src/worker.rs
@@ -158,6 +158,8 @@ impl WorkerGroup {
.sst_meta_cache_size(config.sst_meta_cache_size.as_bytes())
.vector_cache_size(config.vector_cache_size.as_bytes())
.page_cache_size(config.page_cache_size.as_bytes())
+ .index_metadata_size(config.inverted_index.metadata_cache_size.as_bytes())
+ .index_content_size(config.inverted_index.content_cache_size.as_bytes())
.write_cache(write_cache)
.build(),
);
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index 7cebae847e26..96d9316f5549 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -839,6 +839,8 @@ create_on_compaction = "auto"
apply_on_query = "auto"
mem_threshold_on_create = "auto"
compress = true
+metadata_cache_size = "32MiB"
+content_cache_size = "32MiB"
[region_engine.mito.fulltext_index]
create_on_flush = "auto"
|
feat
|
inverted index cache (#4309)
|
4278c858f376497e1517b9aa3f88d124f3049aa5
|
2024-01-21 12:26:45
|
Weny Xu
|
feat: make procedure able to return output (#3201)
| false
|
diff --git a/src/common/meta/src/ddl/alter_table.rs b/src/common/meta/src/ddl/alter_table.rs
index 3a110ea52315..7ba897877d85 100644
--- a/src/common/meta/src/ddl/alter_table.rs
+++ b/src/common/meta/src/ddl/alter_table.rs
@@ -333,7 +333,7 @@ impl AlterTableProcedure {
.await?;
};
- Ok(Status::Done)
+ Ok(Status::done())
}
fn lock_key_inner(&self) -> Vec<StringKey> {
diff --git a/src/common/meta/src/ddl/create_table.rs b/src/common/meta/src/ddl/create_table.rs
index 110ccbeb6534..1fe354d63d63 100644
--- a/src/common/meta/src/ddl/create_table.rs
+++ b/src/common/meta/src/ddl/create_table.rs
@@ -123,7 +123,7 @@ impl CreateTableProcedure {
}
);
- return Ok(Status::Done);
+ return Ok(Status::done());
}
self.creator.data.state = CreateTableState::DatanodeCreateRegions;
@@ -313,7 +313,7 @@ impl CreateTableProcedure {
.await?;
info!("Created table metadata for table {table_id}");
- Ok(Status::Done)
+ Ok(Status::done())
}
}
diff --git a/src/common/meta/src/ddl/drop_table.rs b/src/common/meta/src/ddl/drop_table.rs
index b1045abdea2c..ceb47193d7c7 100644
--- a/src/common/meta/src/ddl/drop_table.rs
+++ b/src/common/meta/src/ddl/drop_table.rs
@@ -100,7 +100,7 @@ impl DropTableProcedure {
.await?;
if !exist && self.data.task.drop_if_exists {
- return Ok(Status::Done);
+ return Ok(Status::done());
}
ensure!(
@@ -236,7 +236,7 @@ impl DropTableProcedure {
.into_iter()
.collect::<Result<Vec<_>>>()?;
- Ok(Status::Done)
+ Ok(Status::done())
}
}
diff --git a/src/common/meta/src/ddl/truncate_table.rs b/src/common/meta/src/ddl/truncate_table.rs
index 973c15196577..609feef26ffa 100644
--- a/src/common/meta/src/ddl/truncate_table.rs
+++ b/src/common/meta/src/ddl/truncate_table.rs
@@ -182,7 +182,7 @@ impl TruncateTableProcedure {
.into_iter()
.collect::<Result<Vec<_>>>()?;
- Ok(Status::Done)
+ Ok(Status::done())
}
}
diff --git a/src/common/procedure-test/src/lib.rs b/src/common/procedure-test/src/lib.rs
index 938a2ad91b84..fb759b16a2fc 100644
--- a/src/common/procedure-test/src/lib.rs
+++ b/src/common/procedure-test/src/lib.rs
@@ -60,7 +60,7 @@ pub async fn execute_procedure_until_done(procedure: &mut dyn Procedure) {
subprocedures.is_empty(),
"Executing subprocedure is unsupported"
),
- Status::Done => break,
+ Status::Done { .. } => break,
}
}
}
@@ -87,7 +87,7 @@ pub async fn execute_procedure_once(
);
false
}
- Status::Done => true,
+ Status::Done { .. } => true,
}
}
@@ -108,7 +108,7 @@ pub async fn execute_until_suspended_or_done(
match procedure.execute(&ctx).await.unwrap() {
Status::Executing { .. } => (),
Status::Suspended { subprocedures, .. } => return Some(subprocedures),
- Status::Done => break,
+ Status::Done { .. } => break,
}
}
diff --git a/src/common/procedure/src/local.rs b/src/common/procedure/src/local.rs
index 624e98d181a8..c68005db590d 100644
--- a/src/common/procedure/src/local.rs
+++ b/src/common/procedure/src/local.rs
@@ -662,7 +662,7 @@ mod tests {
assert!(ctx.contains_procedure(meta.id));
assert!(ctx.state(meta.id).unwrap().is_running());
- meta.set_state(ProcedureState::Done);
+ meta.set_state(ProcedureState::Done { output: None });
assert!(ctx.state(meta.id).unwrap().is_done());
}
@@ -723,7 +723,7 @@ mod tests {
}
async fn execute(&mut self, _ctx: &Context) -> Result<Status> {
- Ok(Status::Done)
+ Ok(Status::done())
}
fn dump(&self) -> Result<String> {
diff --git a/src/common/procedure/src/local/runner.rs b/src/common/procedure/src/local/runner.rs
index 87f2e2f635b1..ef4e018de268 100644
--- a/src/common/procedure/src/local/runner.rs
+++ b/src/common/procedure/src/local/runner.rs
@@ -22,7 +22,7 @@ use tokio::time;
use super::rwlock::OwnedKeyRwLockGuard;
use crate::error::{self, ProcedurePanicSnafu, Result};
use crate::local::{ManagerContext, ProcedureMeta, ProcedureMetaRef};
-use crate::procedure::StringKey;
+use crate::procedure::{Output, StringKey};
use crate::store::ProcedureStore;
use crate::ProcedureState::Retrying;
use crate::{BoxedProcedure, Context, Error, ProcedureId, ProcedureState, ProcedureWithId, Status};
@@ -288,13 +288,13 @@ impl Runner {
Status::Suspended { subprocedures, .. } => {
self.on_suspended(subprocedures).await;
}
- Status::Done => {
+ Status::Done { output } => {
if let Err(e) = self.commit_procedure().await {
self.meta.set_state(ProcedureState::retrying(Arc::new(e)));
return ExecResult::RetryLater;
}
- self.done();
+ self.done(output);
return ExecResult::Done;
}
}
@@ -481,7 +481,7 @@ impl Runner {
Ok(())
}
- fn done(&self) {
+ fn done(&self, output: Option<Output>) {
// TODO(yingwen): Add files to remove list.
logging::info!(
"Procedure {}-{} done",
@@ -490,7 +490,7 @@ impl Runner {
);
// Mark the state of this procedure to done.
- self.meta.set_state(ProcedureState::Done);
+ self.meta.set_state(ProcedureState::Done { output });
}
}
@@ -610,7 +610,7 @@ mod tests {
if times == 1 {
Ok(Status::Executing { persist })
} else {
- Ok(Status::Done)
+ Ok(Status::done())
}
}
.boxed()
@@ -703,7 +703,7 @@ mod tests {
time::sleep(Duration::from_millis(200)).await;
Ok(Status::Executing { persist: true })
} else {
- Ok(Status::Done)
+ Ok(Status::done())
}
}
.boxed()
@@ -764,7 +764,7 @@ mod tests {
}
}
if all_child_done {
- Ok(Status::Done)
+ Ok(Status::done())
} else {
// Return suspended to wait for notify.
Ok(Status::Suspended {
@@ -923,7 +923,7 @@ mod tests {
if times == 1 {
Err(Error::retry_later(MockError::new(StatusCode::Unexpected)))
} else {
- Ok(Status::Done)
+ Ok(Status::done())
}
}
.boxed()
diff --git a/src/common/procedure/src/procedure.rs b/src/common/procedure/src/procedure.rs
index 2df005bdf042..a60d935c3e25 100644
--- a/src/common/procedure/src/procedure.rs
+++ b/src/common/procedure/src/procedure.rs
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::any::Any;
use std::fmt;
use std::str::FromStr;
use std::sync::Arc;
@@ -25,6 +26,8 @@ use uuid::Uuid;
use crate::error::{Error, Result};
use crate::watcher::Watcher;
+pub type Output = Arc<dyn Any + Send + Sync>;
+
/// Procedure execution status.
#[derive(Debug)]
pub enum Status {
@@ -40,7 +43,7 @@ pub enum Status {
persist: bool,
},
/// the procedure is done.
- Done,
+ Done { output: Option<Output> },
}
impl Status {
@@ -49,13 +52,29 @@ impl Status {
Status::Executing { persist }
}
+ /// Returns a [Status::Done] without output.
+ pub fn done() -> Status {
+ Status::Done { output: None }
+ }
+
+ /// Returns a [Status::Done] with output.
+ pub fn done_with_output(output: Output) -> Status {
+ Status::Done {
+ output: Some(output),
+ }
+ }
+ /// Returns `true` if the procedure is done.
+ pub fn is_done(&self) -> bool {
+ matches!(self, Status::Done { .. })
+ }
+
/// Returns `true` if the procedure needs the framework to persist its intermediate state.
pub fn need_persist(&self) -> bool {
// If the procedure is done, the framework doesn't need to persist the procedure
// anymore. It only needs to mark the procedure as committed.
match self {
Status::Executing { persist } | Status::Suspended { persist, .. } => *persist,
- Status::Done => false,
+ Status::Done { .. } => false,
}
}
}
@@ -251,7 +270,7 @@ pub enum ProcedureState {
#[default]
Running,
/// The procedure is finished.
- Done,
+ Done { output: Option<Output> },
/// The procedure is failed and can be retried.
Retrying { error: Arc<Error> },
/// The procedure is failed and cannot proceed anymore.
@@ -276,7 +295,7 @@ impl ProcedureState {
/// Returns true if the procedure state is done.
pub fn is_done(&self) -> bool {
- matches!(self, ProcedureState::Done)
+ matches!(self, ProcedureState::Done { .. })
}
/// Returns true if the procedure state failed.
@@ -360,7 +379,7 @@ mod tests {
};
assert!(status.need_persist());
- let status = Status::Done;
+ let status = Status::done();
assert!(!status.need_persist());
}
@@ -415,7 +434,7 @@ mod tests {
fn test_procedure_state() {
assert!(ProcedureState::Running.is_running());
assert!(ProcedureState::Running.error().is_none());
- assert!(ProcedureState::Done.is_done());
+ assert!(ProcedureState::Done { output: None }.is_done());
let state = ProcedureState::failed(Arc::new(Error::external(MockError::new(
StatusCode::Unexpected,
diff --git a/src/common/procedure/src/watcher.rs b/src/common/procedure/src/watcher.rs
index 584aae520df7..93aa91d5bc35 100644
--- a/src/common/procedure/src/watcher.rs
+++ b/src/common/procedure/src/watcher.rs
@@ -17,19 +17,19 @@ use snafu::ResultExt;
use tokio::sync::watch::Receiver;
use crate::error::{ProcedureExecSnafu, Result, WaitWatcherSnafu};
-use crate::procedure::ProcedureState;
+use crate::procedure::{Output, ProcedureState};
/// Watcher to watch procedure state.
pub type Watcher = Receiver<ProcedureState>;
/// Wait the [Watcher] until the [ProcedureState] is done.
-pub async fn wait(watcher: &mut Watcher) -> Result<()> {
+pub async fn wait(watcher: &mut Watcher) -> Result<Option<Output>> {
loop {
watcher.changed().await.context(WaitWatcherSnafu)?;
match &*watcher.borrow() {
ProcedureState::Running => (),
- ProcedureState::Done => {
- return Ok(());
+ ProcedureState::Done { output } => {
+ return Ok(output.clone());
}
ProcedureState::Failed { error } => {
return Err(error.clone()).context(ProcedureExecSnafu);
@@ -89,7 +89,7 @@ mod tests {
self.error = !self.error;
Err(Error::retry_later(MockError::new(StatusCode::Internal)))
} else {
- Ok(Status::Done)
+ Ok(Status::done_with_output(Arc::new("hello")))
}
}
@@ -111,6 +111,8 @@ mod tests {
.await
.unwrap();
- wait(&mut watcher).await.unwrap();
+ let output = wait(&mut watcher).await.unwrap().unwrap();
+ let output = output.downcast::<&str>().unwrap();
+ assert_eq!(output.as_ref(), &"hello");
}
}
diff --git a/src/meta-srv/src/procedure/region_failover/failover_end.rs b/src/meta-srv/src/procedure/region_failover/failover_end.rs
index fa299ee46422..48d0a1fa1826 100644
--- a/src/meta-srv/src/procedure/region_failover/failover_end.rs
+++ b/src/meta-srv/src/procedure/region_failover/failover_end.rs
@@ -31,6 +31,6 @@ impl State for RegionFailoverEnd {
}
fn status(&self) -> Status {
- Status::Done
+ Status::done()
}
}
diff --git a/src/meta-srv/src/procedure/region_migration.rs b/src/meta-srv/src/procedure/region_migration.rs
index d1c73597b79a..77e1493cfc7e 100644
--- a/src/meta-srv/src/procedure/region_migration.rs
+++ b/src/meta-srv/src/procedure/region_migration.rs
@@ -511,7 +511,7 @@ mod tests {
let pc = &mut ctx.persistent_ctx;
if pc.cluster_id == 2 {
- Ok((Box::new(RegionMigrationEnd), Status::Done))
+ Ok((Box::new(RegionMigrationEnd), Status::done()))
} else {
pc.cluster_id += 1;
Ok((Box::new(MockState), Status::executing(false)))
@@ -540,7 +540,7 @@ mod tests {
for _ in 0..3 {
status = Some(procedure.execute(&ctx).await.unwrap());
}
- assert_matches!(status.unwrap(), Status::Done);
+ assert!(status.unwrap().is_done());
let ctx = TestingEnv::procedure_context();
let mut procedure = new_mock_procedure(&env);
@@ -557,7 +557,7 @@ mod tests {
status = Some(procedure.execute(&ctx).await.unwrap());
}
assert_eq!(procedure.context.persistent_ctx.cluster_id, 2);
- assert_matches!(status.unwrap(), Status::Done);
+ assert!(status.unwrap().is_done());
}
#[tokio::test]
diff --git a/src/meta-srv/src/procedure/region_migration/migration_end.rs b/src/meta-srv/src/procedure/region_migration/migration_end.rs
index dd7efdc92bb7..0aebbd719c37 100644
--- a/src/meta-srv/src/procedure/region_migration/migration_end.rs
+++ b/src/meta-srv/src/procedure/region_migration/migration_end.rs
@@ -27,7 +27,7 @@ pub struct RegionMigrationEnd;
#[typetag::serde]
impl State for RegionMigrationEnd {
async fn next(&mut self, _: &mut Context) -> Result<(Box<dyn State>, Status)> {
- Ok((Box::new(RegionMigrationEnd), Status::Done))
+ Ok((Box::new(RegionMigrationEnd), Status::done()))
}
fn as_any(&self) -> &dyn Any {
diff --git a/src/meta-srv/src/procedure/region_migration/migration_start.rs b/src/meta-srv/src/procedure/region_migration/migration_start.rs
index 048a1a4cbb04..3f8103341029 100644
--- a/src/meta-srv/src/procedure/region_migration/migration_start.rs
+++ b/src/meta-srv/src/procedure/region_migration/migration_start.rs
@@ -55,14 +55,14 @@ impl State for RegionMigrationStart {
let from_peer = &ctx.persistent_ctx.from_peer;
if self.has_migrated(®ion_route, to_peer)? {
- Ok((Box::new(RegionMigrationEnd), Status::Done))
+ Ok((Box::new(RegionMigrationEnd), Status::done()))
} else if self.invalid_leader_peer(®ion_route, from_peer)? {
Ok((
Box::new(RegionMigrationAbort::new(&format!(
"Invalid region leader peer: {from_peer:?}, expected: {:?}",
region_route.leader_peer.as_ref().unwrap(),
))),
- Status::Done,
+ Status::done(),
))
} else if self.check_candidate_region_on_peer(®ion_route, to_peer) {
Ok((Box::new(UpdateMetadata::Downgrade), Status::executing(true)))
diff --git a/src/meta-srv/src/procedure/region_migration/test_util.rs b/src/meta-srv/src/procedure/region_migration/test_util.rs
index c311977838dd..c09d18c8965f 100644
--- a/src/meta-srv/src/procedure/region_migration/test_util.rs
+++ b/src/meta-srv/src/procedure/region_migration/test_util.rs
@@ -520,7 +520,7 @@ pub(crate) fn assert_no_persist(status: Status) {
/// Asserts the [Status] should be [Status::Done].
pub(crate) fn assert_done(status: Status) {
- assert_matches!(status, Status::Done)
+ assert!(status.is_done());
}
/// Asserts the [State] should be [OpenCandidateRegion].
diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata.rs b/src/meta-srv/src/procedure/region_migration/update_metadata.rs
index 90b60621a407..180cf31fe1c4 100644
--- a/src/meta-srv/src/procedure/region_migration/update_metadata.rs
+++ b/src/meta-srv/src/procedure/region_migration/update_metadata.rs
@@ -58,7 +58,7 @@ impl State for UpdateMetadata {
if let Err(err) = ctx.invalidate_table_cache().await {
warn!("Failed to broadcast the invalidate table cache message during the upgrade candidate, error: {err:?}");
};
- Ok((Box::new(RegionMigrationEnd), Status::Done))
+ Ok((Box::new(RegionMigrationEnd), Status::done()))
}
UpdateMetadata::Rollback => {
self.rollback_downgraded_region(ctx).await?;
diff --git a/src/meta-srv/src/procedure/tests.rs b/src/meta-srv/src/procedure/tests.rs
index d042cdc37378..8c69faf2d2e1 100644
--- a/src/meta-srv/src/procedure/tests.rs
+++ b/src/meta-srv/src/procedure/tests.rs
@@ -275,7 +275,7 @@ async fn test_on_datanode_drop_regions() {
});
let status = procedure.on_datanode_drop_regions().await.unwrap();
- assert!(matches!(status, Status::Done));
+ assert!(status.is_done());
handle.await.unwrap();
|
feat
|
make procedure able to return output (#3201)
|
c0aed1d267f7f1d638bedc68456ed7ecc794c6bf
|
2024-06-04 15:33:33
|
LFC
|
feat: set global runtime size by config file (#4063)
| false
|
diff --git a/.github/actions/setup-greptimedb-cluster/action.yml b/.github/actions/setup-greptimedb-cluster/action.yml
index 93d8c569c95d..eaf0032c7715 100644
--- a/.github/actions/setup-greptimedb-cluster/action.yml
+++ b/.github/actions/setup-greptimedb-cluster/action.yml
@@ -57,6 +57,7 @@ runs:
greptime/greptimedb-cluster \
--create-namespace \
-n my-greptimedb \
+ --values ./.github/actions/setup-greptimedb-cluster/values.yaml \
--wait \
--wait-for-jobs
- name: Wait for GreptimeDB
diff --git a/.github/actions/setup-greptimedb-cluster/values.yaml b/.github/actions/setup-greptimedb-cluster/values.yaml
new file mode 100644
index 000000000000..b7ac1eb86e17
--- /dev/null
+++ b/.github/actions/setup-greptimedb-cluster/values.yaml
@@ -0,0 +1,18 @@
+meta:
+ config: |-
+ [runtime]
+ read_rt_size = 8
+ write_rt_size = 8
+ bg_rt_size = 8
+datanode:
+ config: |-
+ [runtime]
+ read_rt_size = 8
+ write_rt_size = 8
+ bg_rt_size = 8
+frontend:
+ config: |-
+ [runtime]
+ read_rt_size = 8
+ write_rt_size = 8
+ bg_rt_size = 8
\ No newline at end of file
diff --git a/Cargo.lock b/Cargo.lock
index 44ad55172287..306bccd24f43 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2080,9 +2080,11 @@ dependencies = [
"common-macro",
"common-telemetry",
"lazy_static",
+ "num_cpus",
"once_cell",
"paste",
"prometheus",
+ "serde",
"snafu 0.8.3",
"tokio",
"tokio-metrics",
@@ -11049,8 +11051,7 @@ dependencies = [
[[package]]
name = "tokio-metrics-collector"
version = "0.2.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d767da47381602cc481653456823b3ebb600e83d5dd4e0293da9b5566c6c00f0"
+source = "git+https://github.com/MichaelScofield/tokio-metrics-collector.git?rev=89d692d5753d28564a7aac73c6ac5aba22243ba0#89d692d5753d28564a7aac73c6ac5aba22243ba0"
dependencies = [
"lazy_static",
"parking_lot 0.12.3",
diff --git a/config/config.md b/config/config.md
index 912e8ca7508c..5c4878c9d37d 100644
--- a/config/config.md
+++ b/config/config.md
@@ -13,6 +13,10 @@
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
| `default_timezone` | String | `None` | The default timezone of the server. |
+| `runtime` | -- | -- | The runtime options. |
+| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
+| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
+| `runtime.bg_rt_size` | Integer | `8` | The number of threads to execute the runtime for global background operations. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. |
@@ -154,6 +158,10 @@
| --- | -----| ------- | ----------- |
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `default_timezone` | String | `None` | The default timezone of the server. |
+| `runtime` | -- | -- | The runtime options. |
+| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
+| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
+| `runtime.bg_rt_size` | Integer | `8` | The number of threads to execute the runtime for global background operations. |
| `heartbeat` | -- | -- | The heartbeat options. |
| `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
@@ -240,6 +248,10 @@
| `use_memory_store` | Bool | `false` | Store data in memory. |
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
+| `runtime` | -- | -- | The runtime options. |
+| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
+| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
+| `runtime.bg_rt_size` | Integer | `8` | The number of threads to execute the runtime for global background operations. |
| `procedure` | -- | -- | Procedure storage options. |
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
@@ -300,6 +312,10 @@
| `rpc_max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `rpc_max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
+| `runtime` | -- | -- | The runtime options. |
+| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
+| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
+| `runtime.bg_rt_size` | Integer | `8` | The number of threads to execute the runtime for global background operations. |
| `heartbeat` | -- | -- | The heartbeat options. |
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
diff --git a/config/datanode.example.toml b/config/datanode.example.toml
index d1849048778c..3a20d3ac5f16 100644
--- a/config/datanode.example.toml
+++ b/config/datanode.example.toml
@@ -32,6 +32,15 @@ rpc_max_send_message_size = "512MB"
## Enable telemetry to collect anonymous usage data.
enable_telemetry = true
+## The runtime options.
+[runtime]
+## The number of threads to execute the runtime for global read operations.
+read_rt_size = 8
+## The number of threads to execute the runtime for global write operations.
+write_rt_size = 8
+## The number of threads to execute the runtime for global background operations.
+bg_rt_size = 8
+
## The heartbeat options.
[heartbeat]
## Interval for sending heartbeat messages to the metasrv.
diff --git a/config/frontend.example.toml b/config/frontend.example.toml
index 728a3099f837..4f4bd5bf3d3d 100644
--- a/config/frontend.example.toml
+++ b/config/frontend.example.toml
@@ -5,6 +5,15 @@ mode = "standalone"
## +toml2docs:none-default
default_timezone = "UTC"
+## The runtime options.
+[runtime]
+## The number of threads to execute the runtime for global read operations.
+read_rt_size = 8
+## The number of threads to execute the runtime for global write operations.
+write_rt_size = 8
+## The number of threads to execute the runtime for global background operations.
+bg_rt_size = 8
+
## The heartbeat options.
[heartbeat]
## Interval for sending heartbeat messages to the metasrv.
diff --git a/config/metasrv.example.toml b/config/metasrv.example.toml
index bc6a5d119342..239533bd5886 100644
--- a/config/metasrv.example.toml
+++ b/config/metasrv.example.toml
@@ -25,6 +25,15 @@ enable_telemetry = true
## If it's not empty, the metasrv will store all data with this key prefix.
store_key_prefix = ""
+## The runtime options.
+[runtime]
+## The number of threads to execute the runtime for global read operations.
+read_rt_size = 8
+## The number of threads to execute the runtime for global write operations.
+write_rt_size = 8
+## The number of threads to execute the runtime for global background operations.
+bg_rt_size = 8
+
## Procedure storage options.
[procedure]
diff --git a/config/standalone.example.toml b/config/standalone.example.toml
index 8386c7e1e61a..d6fcc3e8943e 100644
--- a/config/standalone.example.toml
+++ b/config/standalone.example.toml
@@ -8,6 +8,15 @@ enable_telemetry = true
## +toml2docs:none-default
default_timezone = "UTC"
+## The runtime options.
+[runtime]
+## The number of threads to execute the runtime for global read operations.
+read_rt_size = 8
+## The number of threads to execute the runtime for global write operations.
+write_rt_size = 8
+## The number of threads to execute the runtime for global background operations.
+bg_rt_size = 8
+
## The HTTP server options.
[http]
## The address to bind the HTTP server.
diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs
index 3c189f2c3d07..d8680ed5294e 100644
--- a/src/cmd/src/datanode.rs
+++ b/src/cmd/src/datanode.rs
@@ -23,7 +23,6 @@ use common_telemetry::info;
use common_telemetry::logging::TracingOptions;
use common_version::{short_version, version};
use common_wal::config::DatanodeWalConfig;
-use datanode::config::DatanodeOptions;
use datanode::datanode::{Datanode, DatanodeBuilder};
use datanode::service::DatanodeServiceBuilder;
use meta_client::MetaClientOptions;
@@ -34,11 +33,13 @@ use tracing_appender::non_blocking::WorkerGuard;
use crate::error::{
LoadLayeredConfigSnafu, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu,
};
-use crate::options::GlobalOptions;
+use crate::options::{GlobalOptions, GreptimeOptions};
use crate::{log_versions, App};
pub const APP_NAME: &str = "greptime-datanode";
+type DatanodeOptions = GreptimeOptions<datanode::config::DatanodeOptions>;
+
pub struct Instance {
datanode: Datanode,
@@ -97,7 +98,9 @@ impl Command {
}
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<DatanodeOptions> {
- self.subcmd.load_options(global_options)
+ match &self.subcmd {
+ SubCommand::Start(cmd) => cmd.load_options(global_options),
+ }
}
}
@@ -112,12 +115,6 @@ impl SubCommand {
SubCommand::Start(cmd) => cmd.build(opts).await,
}
}
-
- fn load_options(&self, global_options: &GlobalOptions) -> Result<DatanodeOptions> {
- match self {
- SubCommand::Start(cmd) => cmd.load_options(global_options),
- }
- }
}
#[derive(Debug, Parser, Default)]
@@ -146,22 +143,25 @@ struct StartCommand {
impl StartCommand {
fn load_options(&self, global_options: &GlobalOptions) -> Result<DatanodeOptions> {
- self.merge_with_cli_options(
- global_options,
- DatanodeOptions::load_layered_options(
- self.config_file.as_deref(),
- self.env_prefix.as_ref(),
- )
- .context(LoadLayeredConfigSnafu)?,
+ let mut opts = DatanodeOptions::load_layered_options(
+ self.config_file.as_deref(),
+ self.env_prefix.as_ref(),
)
+ .context(LoadLayeredConfigSnafu)?;
+
+ self.merge_with_cli_options(global_options, &mut opts)?;
+
+ Ok(opts)
}
// The precedence order is: cli > config file > environment variables > default values.
fn merge_with_cli_options(
&self,
global_options: &GlobalOptions,
- mut opts: DatanodeOptions,
- ) -> Result<DatanodeOptions> {
+ opts: &mut DatanodeOptions,
+ ) -> Result<()> {
+ let opts = &mut opts.component;
+
if let Some(dir) = &global_options.log_dir {
opts.logging.dir.clone_from(dir);
}
@@ -231,25 +231,28 @@ impl StartCommand {
// Disable dashboard in datanode.
opts.http.disable_dashboard = true;
- Ok(opts)
+ Ok(())
}
- async fn build(&self, mut opts: DatanodeOptions) -> Result<Instance> {
+ async fn build(&self, opts: DatanodeOptions) -> Result<Instance> {
+ common_runtime::init_global_runtimes(&opts.runtime);
+
let guard = common_telemetry::init_global_logging(
APP_NAME,
- &opts.logging,
- &opts.tracing,
- opts.node_id.map(|x| x.to_string()),
+ &opts.component.logging,
+ &opts.component.tracing,
+ opts.component.node_id.map(|x| x.to_string()),
);
log_versions(version!(), short_version!());
+ info!("Datanode start command: {:#?}", self);
+ info!("Datanode options: {:#?}", opts);
+
+ let mut opts = opts.component;
let plugins = plugins::setup_datanode_plugins(&mut opts)
.await
.context(StartDatanodeSnafu)?;
- info!("Datanode start command: {:#?}", self);
- info!("Datanode options: {:#?}", opts);
-
let node_id = opts
.node_id
.context(MissingConfigSnafu { msg: "'node_id'" })?;
@@ -353,7 +356,7 @@ mod tests {
..Default::default()
};
- let options = cmd.load_options(&GlobalOptions::default()).unwrap();
+ let options = cmd.load_options(&Default::default()).unwrap().component;
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
assert_eq!(Some(42), options.node_id);
@@ -414,7 +417,8 @@ mod tests {
fn test_try_from_cmd() {
let opt = StartCommand::default()
.load_options(&GlobalOptions::default())
- .unwrap();
+ .unwrap()
+ .component;
assert_eq!(Mode::Standalone, opt.mode);
let opt = (StartCommand {
@@ -423,7 +427,8 @@ mod tests {
..Default::default()
})
.load_options(&GlobalOptions::default())
- .unwrap();
+ .unwrap()
+ .component;
assert_eq!(Mode::Distributed, opt.mode);
assert!((StartCommand {
@@ -454,7 +459,8 @@ mod tests {
#[cfg(feature = "tokio-console")]
tokio_console_addr: None,
})
- .unwrap();
+ .unwrap()
+ .component;
let logging_opt = options.logging;
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
@@ -536,7 +542,7 @@ mod tests {
..Default::default()
};
- let opts = command.load_options(&GlobalOptions::default()).unwrap();
+ let opts = command.load_options(&Default::default()).unwrap().component;
// Should be read from env, env > default values.
let DatanodeWalConfig::RaftEngine(raft_engine_config) = opts.wal else {
@@ -562,7 +568,10 @@ mod tests {
assert_eq!(raft_engine_config.dir.unwrap(), "/other/wal/dir");
// Should be default value.
- assert_eq!(opts.http.addr, DatanodeOptions::default().http.addr);
+ assert_eq!(
+ opts.http.addr,
+ DatanodeOptions::default().component.http.addr
+ );
},
);
}
diff --git a/src/cmd/src/frontend.rs b/src/cmd/src/frontend.rs
index a3e744e9c7ec..a7781e37a2ed 100644
--- a/src/cmd/src/frontend.rs
+++ b/src/cmd/src/frontend.rs
@@ -29,7 +29,6 @@ use common_telemetry::info;
use common_telemetry::logging::TracingOptions;
use common_time::timezone::set_default_timezone;
use common_version::{short_version, version};
-use frontend::frontend::FrontendOptions;
use frontend::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHandler;
use frontend::heartbeat::HeartbeatTask;
use frontend::instance::builder::FrontendBuilder;
@@ -44,9 +43,11 @@ use tracing_appender::non_blocking::WorkerGuard;
use crate::error::{
self, InitTimezoneSnafu, LoadLayeredConfigSnafu, MissingConfigSnafu, Result, StartFrontendSnafu,
};
-use crate::options::GlobalOptions;
+use crate::options::{GlobalOptions, GreptimeOptions};
use crate::{log_versions, App};
+type FrontendOptions = GreptimeOptions<frontend::frontend::FrontendOptions>;
+
pub struct Instance {
frontend: FeInstance,
@@ -164,22 +165,25 @@ pub struct StartCommand {
impl StartCommand {
fn load_options(&self, global_options: &GlobalOptions) -> Result<FrontendOptions> {
- self.merge_with_cli_options(
- global_options,
- FrontendOptions::load_layered_options(
- self.config_file.as_deref(),
- self.env_prefix.as_ref(),
- )
- .context(LoadLayeredConfigSnafu)?,
+ let mut opts = FrontendOptions::load_layered_options(
+ self.config_file.as_deref(),
+ self.env_prefix.as_ref(),
)
+ .context(LoadLayeredConfigSnafu)?;
+
+ self.merge_with_cli_options(global_options, &mut opts)?;
+
+ Ok(opts)
}
// The precedence order is: cli > config file > environment variables > default values.
fn merge_with_cli_options(
&self,
global_options: &GlobalOptions,
- mut opts: FrontendOptions,
- ) -> Result<FrontendOptions> {
+ opts: &mut FrontendOptions,
+ ) -> Result<()> {
+ let opts = &mut opts.component;
+
if let Some(dir) = &global_options.log_dir {
opts.logging.dir.clone_from(dir);
}
@@ -242,26 +246,29 @@ impl StartCommand {
opts.user_provider.clone_from(&self.user_provider);
- Ok(opts)
+ Ok(())
}
- async fn build(&self, mut opts: FrontendOptions) -> Result<Instance> {
+ async fn build(&self, opts: FrontendOptions) -> Result<Instance> {
+ common_runtime::init_global_runtimes(&opts.runtime);
+
let guard = common_telemetry::init_global_logging(
APP_NAME,
- &opts.logging,
- &opts.tracing,
- opts.node_id.clone(),
+ &opts.component.logging,
+ &opts.component.tracing,
+ opts.component.node_id.clone(),
);
log_versions(version!(), short_version!());
+ info!("Frontend start command: {:#?}", self);
+ info!("Frontend options: {:#?}", opts);
+
+ let mut opts = opts.component;
#[allow(clippy::unnecessary_mut_passed)]
let plugins = plugins::setup_frontend_plugins(&mut opts)
.await
.context(StartFrontendSnafu)?;
- info!("Frontend start command: {:#?}", self);
- info!("Frontend options: {:#?}", opts);
-
set_default_timezone(opts.default_timezone.as_deref()).context(InitTimezoneSnafu)?;
let meta_client_options = opts.meta_client.as_ref().context(MissingConfigSnafu {
@@ -380,14 +387,14 @@ mod tests {
..Default::default()
};
- let opts = command.load_options(&GlobalOptions::default()).unwrap();
+ let opts = command.load_options(&Default::default()).unwrap().component;
assert_eq!(opts.http.addr, "127.0.0.1:1234");
assert_eq!(ReadableSize::mb(64), opts.http.body_limit);
assert_eq!(opts.mysql.addr, "127.0.0.1:5678");
assert_eq!(opts.postgres.addr, "127.0.0.1:5432");
- let default_opts = FrontendOptions::default();
+ let default_opts = FrontendOptions::default().component;
assert_eq!(opts.grpc.addr, default_opts.grpc.addr);
assert!(opts.mysql.enable);
@@ -428,7 +435,8 @@ mod tests {
..Default::default()
};
- let fe_opts = command.load_options(&GlobalOptions::default()).unwrap();
+ let fe_opts = command.load_options(&Default::default()).unwrap().component;
+
assert_eq!(Mode::Distributed, fe_opts.mode);
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
assert_eq!(Duration::from_secs(30), fe_opts.http.timeout);
@@ -442,7 +450,7 @@ mod tests {
#[tokio::test]
async fn test_try_from_start_command_to_anymap() {
- let mut fe_opts = FrontendOptions {
+ let mut fe_opts = frontend::frontend::FrontendOptions {
http: HttpOptions {
disable_dashboard: false,
..Default::default()
@@ -479,7 +487,8 @@ mod tests {
#[cfg(feature = "tokio-console")]
tokio_console_addr: None,
})
- .unwrap();
+ .unwrap()
+ .component;
let logging_opt = options.logging;
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
@@ -557,7 +566,7 @@ mod tests {
..Default::default()
};
- let fe_opts = command.load_options(&GlobalOptions::default()).unwrap();
+ let fe_opts = command.load_options(&Default::default()).unwrap().component;
// Should be read from env, env > default values.
assert_eq!(fe_opts.mysql.runtime_size, 11);
diff --git a/src/cmd/src/metasrv.rs b/src/cmd/src/metasrv.rs
index 8648f220f3ac..3b89fdce112e 100644
--- a/src/cmd/src/metasrv.rs
+++ b/src/cmd/src/metasrv.rs
@@ -21,14 +21,15 @@ use common_telemetry::info;
use common_telemetry::logging::TracingOptions;
use common_version::{short_version, version};
use meta_srv::bootstrap::MetasrvInstance;
-use meta_srv::metasrv::MetasrvOptions;
use snafu::ResultExt;
use tracing_appender::non_blocking::WorkerGuard;
use crate::error::{self, LoadLayeredConfigSnafu, Result, StartMetaServerSnafu};
-use crate::options::GlobalOptions;
+use crate::options::{GlobalOptions, GreptimeOptions};
use crate::{log_versions, App};
+type MetasrvOptions = GreptimeOptions<meta_srv::metasrv::MetasrvOptions>;
+
pub const APP_NAME: &str = "greptime-metasrv";
pub struct Instance {
@@ -139,22 +140,25 @@ struct StartCommand {
impl StartCommand {
fn load_options(&self, global_options: &GlobalOptions) -> Result<MetasrvOptions> {
- self.merge_with_cli_options(
- global_options,
- MetasrvOptions::load_layered_options(
- self.config_file.as_deref(),
- self.env_prefix.as_ref(),
- )
- .context(LoadLayeredConfigSnafu)?,
+ let mut opts = MetasrvOptions::load_layered_options(
+ self.config_file.as_deref(),
+ self.env_prefix.as_ref(),
)
+ .context(LoadLayeredConfigSnafu)?;
+
+ self.merge_with_cli_options(global_options, &mut opts)?;
+
+ Ok(opts)
}
// The precedence order is: cli > config file > environment variables > default values.
fn merge_with_cli_options(
&self,
global_options: &GlobalOptions,
- mut opts: MetasrvOptions,
- ) -> Result<MetasrvOptions> {
+ opts: &mut MetasrvOptions,
+ ) -> Result<()> {
+ let opts = &mut opts.component;
+
if let Some(dir) = &global_options.log_dir {
opts.logging.dir.clone_from(dir);
}
@@ -217,21 +221,28 @@ impl StartCommand {
// Disable dashboard in metasrv.
opts.http.disable_dashboard = true;
- Ok(opts)
+ Ok(())
}
- async fn build(&self, mut opts: MetasrvOptions) -> Result<Instance> {
- let guard =
- common_telemetry::init_global_logging(APP_NAME, &opts.logging, &opts.tracing, None);
+ async fn build(&self, opts: MetasrvOptions) -> Result<Instance> {
+ common_runtime::init_global_runtimes(&opts.runtime);
+
+ let guard = common_telemetry::init_global_logging(
+ APP_NAME,
+ &opts.component.logging,
+ &opts.component.tracing,
+ None,
+ );
log_versions(version!(), short_version!());
+ info!("Metasrv start command: {:#?}", self);
+ info!("Metasrv options: {:#?}", opts);
+
+ let mut opts = opts.component;
let plugins = plugins::setup_metasrv_plugins(&mut opts)
.await
.context(StartMetaServerSnafu)?;
- info!("Metasrv start command: {:#?}", self);
- info!("Metasrv options: {:#?}", opts);
-
let builder = meta_srv::bootstrap::metasrv_builder(&opts, plugins.clone(), None)
.await
.context(error::BuildMetaServerSnafu)?;
@@ -266,7 +277,7 @@ mod tests {
..Default::default()
};
- let options = cmd.load_options(&GlobalOptions::default()).unwrap();
+ let options = cmd.load_options(&Default::default()).unwrap().component;
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
assert_eq!(vec!["127.0.0.1:2380".to_string()], options.store_addrs);
assert_eq!(SelectorType::LoadBased, options.selector);
@@ -299,7 +310,7 @@ mod tests {
..Default::default()
};
- let options = cmd.load_options(&GlobalOptions::default()).unwrap();
+ let options = cmd.load_options(&Default::default()).unwrap().component;
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
assert_eq!(vec!["127.0.0.1:2379".to_string()], options.store_addrs);
@@ -349,7 +360,8 @@ mod tests {
#[cfg(feature = "tokio-console")]
tokio_console_addr: None,
})
- .unwrap();
+ .unwrap()
+ .component;
let logging_opt = options.logging;
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
@@ -406,7 +418,7 @@ mod tests {
..Default::default()
};
- let opts = command.load_options(&GlobalOptions::default()).unwrap();
+ let opts = command.load_options(&Default::default()).unwrap().component;
// Should be read from env, env > default values.
assert_eq!(opts.bind_addr, "127.0.0.1:14002");
diff --git a/src/cmd/src/options.rs b/src/cmd/src/options.rs
index 03ccbc536247..26ac9203a225 100644
--- a/src/cmd/src/options.rs
+++ b/src/cmd/src/options.rs
@@ -13,6 +13,9 @@
// limitations under the License.
use clap::Parser;
+use common_config::Configurable;
+use common_runtime::global::RuntimeOptions;
+use serde::{Deserialize, Serialize};
#[derive(Parser, Default, Debug, Clone)]
pub struct GlobalOptions {
@@ -29,3 +32,22 @@ pub struct GlobalOptions {
#[arg(global = true)]
pub tokio_console_addr: Option<String>,
}
+
+// TODO(LFC): Move logging and tracing options into global options, like the runtime options.
+/// All the options of GreptimeDB.
+#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
+#[serde(default)]
+pub struct GreptimeOptions<T> {
+ /// The runtime options.
+ pub runtime: RuntimeOptions,
+
+ /// The options of each component (like Datanode or Standalone) of GreptimeDB.
+ #[serde(flatten)]
+ pub component: T,
+}
+
+impl<T: Configurable> Configurable for GreptimeOptions<T> {
+ fn env_list_keys() -> Option<&'static [&'static str]> {
+ T::env_list_keys()
+ }
+}
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index 90958baf1048..e1ac35c98b06 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -67,7 +67,7 @@ use crate::error::{
ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu, StartProcedureManagerSnafu,
StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
};
-use crate::options::GlobalOptions;
+use crate::options::{GlobalOptions, GreptimeOptions};
use crate::{log_versions, App};
pub const APP_NAME: &str = "greptime-standalone";
@@ -79,11 +79,14 @@ pub struct Command {
}
impl Command {
- pub async fn build(&self, opts: StandaloneOptions) -> Result<Instance> {
+ pub async fn build(&self, opts: GreptimeOptions<StandaloneOptions>) -> Result<Instance> {
self.subcmd.build(opts).await
}
- pub fn load_options(&self, global_options: &GlobalOptions) -> Result<StandaloneOptions> {
+ pub fn load_options(
+ &self,
+ global_options: &GlobalOptions,
+ ) -> Result<GreptimeOptions<StandaloneOptions>> {
self.subcmd.load_options(global_options)
}
}
@@ -94,20 +97,23 @@ enum SubCommand {
}
impl SubCommand {
- async fn build(&self, opts: StandaloneOptions) -> Result<Instance> {
+ async fn build(&self, opts: GreptimeOptions<StandaloneOptions>) -> Result<Instance> {
match self {
SubCommand::Start(cmd) => cmd.build(opts).await,
}
}
- fn load_options(&self, global_options: &GlobalOptions) -> Result<StandaloneOptions> {
+ fn load_options(
+ &self,
+ global_options: &GlobalOptions,
+ ) -> Result<GreptimeOptions<StandaloneOptions>> {
match self {
SubCommand::Start(cmd) => cmd.load_options(global_options),
}
}
}
-#[derive(Clone, Debug, Serialize, Deserialize)]
+#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
#[serde(default)]
pub struct StandaloneOptions {
pub mode: Mode,
@@ -161,7 +167,7 @@ impl Default for StandaloneOptions {
}
}
-impl Configurable<'_> for StandaloneOptions {
+impl Configurable for StandaloneOptions {
fn env_list_keys() -> Option<&'static [&'static str]> {
Some(&["wal.broker_endpoints"])
}
@@ -291,23 +297,27 @@ pub struct StartCommand {
}
impl StartCommand {
- fn load_options(&self, global_options: &GlobalOptions) -> Result<StandaloneOptions> {
- self.merge_with_cli_options(
- global_options,
- StandaloneOptions::load_layered_options(
- self.config_file.as_deref(),
- self.env_prefix.as_ref(),
- )
- .context(LoadLayeredConfigSnafu)?,
+ fn load_options(
+ &self,
+ global_options: &GlobalOptions,
+ ) -> Result<GreptimeOptions<StandaloneOptions>> {
+ let mut opts = GreptimeOptions::<StandaloneOptions>::load_layered_options(
+ self.config_file.as_deref(),
+ self.env_prefix.as_ref(),
)
+ .context(LoadLayeredConfigSnafu)?;
+
+ self.merge_with_cli_options(global_options, &mut opts.component)?;
+
+ Ok(opts)
}
// The precedence order is: cli > config file > environment variables > default values.
pub fn merge_with_cli_options(
&self,
global_options: &GlobalOptions,
- mut opts: StandaloneOptions,
- ) -> Result<StandaloneOptions> {
+ opts: &mut StandaloneOptions,
+ ) -> Result<()> {
// Should always be standalone mode.
opts.mode = Mode::Standalone;
@@ -369,20 +379,27 @@ impl StartCommand {
opts.user_provider.clone_from(&self.user_provider);
- Ok(opts)
+ Ok(())
}
#[allow(unreachable_code)]
#[allow(unused_variables)]
#[allow(clippy::diverging_sub_expression)]
- async fn build(&self, opts: StandaloneOptions) -> Result<Instance> {
- let guard =
- common_telemetry::init_global_logging(APP_NAME, &opts.logging, &opts.tracing, None);
+ async fn build(&self, opts: GreptimeOptions<StandaloneOptions>) -> Result<Instance> {
+ common_runtime::init_global_runtimes(&opts.runtime);
+
+ let guard = common_telemetry::init_global_logging(
+ APP_NAME,
+ &opts.component.logging,
+ &opts.component.tracing,
+ None,
+ );
log_versions(version!(), short_version!());
info!("Standalone start command: {:#?}", self);
- info!("Building standalone instance with {opts:#?}");
+ info!("Standalone options: {opts:#?}");
+ let opts = opts.component;
let mut fe_opts = opts.frontend_options();
#[allow(clippy::unnecessary_mut_passed)]
let fe_plugins = plugins::setup_frontend_plugins(&mut fe_opts) // mut ref is MUST, DO NOT change it
@@ -664,7 +681,10 @@ mod tests {
..Default::default()
};
- let options = cmd.load_options(&GlobalOptions::default()).unwrap();
+ let options = cmd
+ .load_options(&GlobalOptions::default())
+ .unwrap()
+ .component;
let fe_opts = options.frontend_options();
let dn_opts = options.datanode_options();
let logging_opts = options.logging;
@@ -725,7 +745,8 @@ mod tests {
#[cfg(feature = "tokio-console")]
tokio_console_addr: None,
})
- .unwrap();
+ .unwrap()
+ .component;
assert_eq!("/tmp/greptimedb/test/logs", opts.logging.dir);
assert_eq!("debug", opts.logging.level.unwrap());
@@ -787,7 +808,7 @@ mod tests {
..Default::default()
};
- let opts = command.load_options(&GlobalOptions::default()).unwrap();
+ let opts = command.load_options(&Default::default()).unwrap().component;
// Should be read from env, env > default values.
assert_eq!(opts.logging.dir, "/other/log/dir");
diff --git a/src/cmd/tests/load_config_test.rs b/src/cmd/tests/load_config_test.rs
new file mode 100644
index 000000000000..80075b846e51
--- /dev/null
+++ b/src/cmd/tests/load_config_test.rs
@@ -0,0 +1,218 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::time::Duration;
+
+use cmd::options::GreptimeOptions;
+use cmd::standalone::StandaloneOptions;
+use common_config::Configurable;
+use common_runtime::global::RuntimeOptions;
+use common_telemetry::logging::LoggingOptions;
+use common_wal::config::raft_engine::RaftEngineConfig;
+use common_wal::config::{DatanodeWalConfig, StandaloneWalConfig};
+use datanode::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
+use frontend::frontend::FrontendOptions;
+use frontend::service_config::datanode::DatanodeClientOptions;
+use meta_client::MetaClientOptions;
+use meta_srv::metasrv::MetasrvOptions;
+use meta_srv::selector::SelectorType;
+use mito2::config::MitoConfig;
+use servers::export_metrics::ExportMetricsOption;
+
+#[test]
+fn test_load_datanode_example_config() {
+ let example_config = common_test_util::find_workspace_path("config/datanode.example.toml");
+ let options =
+ GreptimeOptions::<DatanodeOptions>::load_layered_options(example_config.to_str(), "")
+ .unwrap();
+
+ let expected = GreptimeOptions::<DatanodeOptions> {
+ runtime: RuntimeOptions {
+ read_rt_size: 8,
+ write_rt_size: 8,
+ bg_rt_size: 8,
+ },
+ component: DatanodeOptions {
+ node_id: Some(42),
+ rpc_hostname: Some("127.0.0.1".to_string()),
+ meta_client: Some(MetaClientOptions {
+ metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
+ timeout: Duration::from_secs(3),
+ heartbeat_timeout: Duration::from_millis(500),
+ ddl_timeout: Duration::from_secs(10),
+ connect_timeout: Duration::from_secs(1),
+ tcp_nodelay: true,
+ metadata_cache_max_capacity: 100000,
+ metadata_cache_ttl: Duration::from_secs(600),
+ metadata_cache_tti: Duration::from_secs(300),
+ }),
+ wal: DatanodeWalConfig::RaftEngine(RaftEngineConfig {
+ dir: Some("/tmp/greptimedb/wal".to_string()),
+ sync_period: Some(Duration::from_secs(10)),
+ ..Default::default()
+ }),
+ storage: StorageConfig {
+ data_home: "/tmp/greptimedb/".to_string(),
+ ..Default::default()
+ },
+ region_engine: vec![RegionEngineConfig::Mito(MitoConfig {
+ num_workers: 8,
+ auto_flush_interval: Duration::from_secs(3600),
+ scan_parallelism: 0,
+ ..Default::default()
+ })],
+ logging: LoggingOptions {
+ level: Some("info".to_string()),
+ otlp_endpoint: Some("".to_string()),
+ tracing_sample_ratio: Some(Default::default()),
+ ..Default::default()
+ },
+ export_metrics: ExportMetricsOption {
+ self_import: Some(Default::default()),
+ remote_write: Some(Default::default()),
+ ..Default::default()
+ },
+ ..Default::default()
+ },
+ };
+
+ assert_eq!(options, expected);
+}
+
+#[test]
+fn test_load_frontend_example_config() {
+ let example_config = common_test_util::find_workspace_path("config/frontend.example.toml");
+ let options =
+ GreptimeOptions::<FrontendOptions>::load_layered_options(example_config.to_str(), "")
+ .unwrap();
+ let expected = GreptimeOptions::<FrontendOptions> {
+ runtime: RuntimeOptions {
+ read_rt_size: 8,
+ write_rt_size: 8,
+ bg_rt_size: 8,
+ },
+ component: FrontendOptions {
+ default_timezone: Some("UTC".to_string()),
+ meta_client: Some(MetaClientOptions {
+ metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
+ timeout: Duration::from_secs(3),
+ heartbeat_timeout: Duration::from_millis(500),
+ ddl_timeout: Duration::from_secs(10),
+ connect_timeout: Duration::from_secs(1),
+ tcp_nodelay: true,
+ metadata_cache_max_capacity: 100000,
+ metadata_cache_ttl: Duration::from_secs(600),
+ metadata_cache_tti: Duration::from_secs(300),
+ }),
+ logging: LoggingOptions {
+ level: Some("info".to_string()),
+ otlp_endpoint: Some("".to_string()),
+ tracing_sample_ratio: Some(Default::default()),
+ ..Default::default()
+ },
+ datanode: frontend::service_config::DatanodeOptions {
+ client: DatanodeClientOptions {
+ connect_timeout: Duration::from_secs(10),
+ tcp_nodelay: true,
+ },
+ },
+ export_metrics: ExportMetricsOption {
+ self_import: Some(Default::default()),
+ remote_write: Some(Default::default()),
+ ..Default::default()
+ },
+ ..Default::default()
+ },
+ };
+ assert_eq!(options, expected);
+}
+
+#[test]
+fn test_load_metasrv_example_config() {
+ let example_config = common_test_util::find_workspace_path("config/metasrv.example.toml");
+ let options =
+ GreptimeOptions::<MetasrvOptions>::load_layered_options(example_config.to_str(), "")
+ .unwrap();
+ let expected = GreptimeOptions::<MetasrvOptions> {
+ runtime: RuntimeOptions {
+ read_rt_size: 8,
+ write_rt_size: 8,
+ bg_rt_size: 8,
+ },
+ component: MetasrvOptions {
+ selector: SelectorType::LeaseBased,
+ data_home: "/tmp/metasrv/".to_string(),
+ logging: LoggingOptions {
+ dir: "/tmp/greptimedb/logs".to_string(),
+ level: Some("info".to_string()),
+ otlp_endpoint: Some("".to_string()),
+ tracing_sample_ratio: Some(Default::default()),
+ ..Default::default()
+ },
+ export_metrics: ExportMetricsOption {
+ self_import: Some(Default::default()),
+ remote_write: Some(Default::default()),
+ ..Default::default()
+ },
+ ..Default::default()
+ },
+ };
+ assert_eq!(options, expected);
+}
+
+#[test]
+fn test_load_standalone_example_config() {
+ let example_config = common_test_util::find_workspace_path("config/standalone.example.toml");
+ let options =
+ GreptimeOptions::<StandaloneOptions>::load_layered_options(example_config.to_str(), "")
+ .unwrap();
+ let expected = GreptimeOptions::<StandaloneOptions> {
+ runtime: RuntimeOptions {
+ read_rt_size: 8,
+ write_rt_size: 8,
+ bg_rt_size: 8,
+ },
+ component: StandaloneOptions {
+ default_timezone: Some("UTC".to_string()),
+ wal: StandaloneWalConfig::RaftEngine(RaftEngineConfig {
+ dir: Some("/tmp/greptimedb/wal".to_string()),
+ sync_period: Some(Duration::from_secs(10)),
+ ..Default::default()
+ }),
+ region_engine: vec![RegionEngineConfig::Mito(MitoConfig {
+ num_workers: 8,
+ auto_flush_interval: Duration::from_secs(3600),
+ scan_parallelism: 0,
+ ..Default::default()
+ })],
+ storage: StorageConfig {
+ data_home: "/tmp/greptimedb/".to_string(),
+ ..Default::default()
+ },
+ logging: LoggingOptions {
+ level: Some("info".to_string()),
+ otlp_endpoint: Some("".to_string()),
+ tracing_sample_ratio: Some(Default::default()),
+ ..Default::default()
+ },
+ export_metrics: ExportMetricsOption {
+ self_import: Some(Default::default()),
+ remote_write: Some(Default::default()),
+ ..Default::default()
+ },
+ ..Default::default()
+ },
+ };
+ assert_eq!(options, expected);
+}
diff --git a/src/common/config/src/config.rs b/src/common/config/src/config.rs
index c21735a059ea..e0816fbd5671 100644
--- a/src/common/config/src/config.rs
+++ b/src/common/config/src/config.rs
@@ -13,7 +13,8 @@
// limitations under the License.
use config::{Environment, File, FileFormat};
-use serde::{Deserialize, Serialize};
+use serde::de::DeserializeOwned;
+use serde::Serialize;
use snafu::ResultExt;
use crate::error::{LoadLayeredConfigSnafu, Result, SerdeJsonSnafu, TomlFormatSnafu};
@@ -25,7 +26,7 @@ pub const ENV_VAR_SEP: &str = "__";
pub const ENV_LIST_SEP: &str = ",";
/// Configuration trait defines the common interface for configuration that can be loaded from multiple sources and serialized to TOML.
-pub trait Configurable<'de>: Serialize + Deserialize<'de> + Default + Sized {
+pub trait Configurable: Serialize + DeserializeOwned + Default + Sized {
/// Load the configuration from multiple sources and merge them.
/// The precedence order is: config file > environment variables > default values.
/// `env_prefix` is the prefix of environment variables, e.g. "FRONTEND__xxx".
@@ -128,7 +129,7 @@ mod tests {
}
}
- impl Configurable<'_> for TestDatanodeConfig {
+ impl Configurable for TestDatanodeConfig {
fn env_list_keys() -> Option<&'static [&'static str]> {
Some(&["meta_client.metasrv_addrs"])
}
diff --git a/src/common/runtime/Cargo.toml b/src/common/runtime/Cargo.toml
index a6da1f571fc2..e5fa276c4bf1 100644
--- a/src/common/runtime/Cargo.toml
+++ b/src/common/runtime/Cargo.toml
@@ -13,13 +13,15 @@ common-error.workspace = true
common-macro.workspace = true
common-telemetry.workspace = true
lazy_static.workspace = true
+num_cpus.workspace = true
once_cell.workspace = true
paste.workspace = true
prometheus.workspace = true
+serde.workspace = true
snafu.workspace = true
tokio.workspace = true
tokio-metrics = "0.3"
-tokio-metrics-collector = "0.2"
+tokio-metrics-collector = { git = "https://github.com/MichaelScofield/tokio-metrics-collector.git", rev = "89d692d5753d28564a7aac73c6ac5aba22243ba0" }
tokio-util.workspace = true
[dev-dependencies]
diff --git a/src/common/runtime/src/global.rs b/src/common/runtime/src/global.rs
index 51bad13107c7..6b21851e1680 100644
--- a/src/common/runtime/src/global.rs
+++ b/src/common/runtime/src/global.rs
@@ -19,6 +19,7 @@ use std::sync::{Mutex, Once};
use common_telemetry::info;
use once_cell::sync::Lazy;
use paste::paste;
+use serde::{Deserialize, Serialize};
use crate::{Builder, JoinHandle, Runtime};
@@ -26,6 +27,28 @@ const READ_WORKERS: usize = 8;
const WRITE_WORKERS: usize = 8;
const BG_WORKERS: usize = 8;
+/// The options for the global runtimes.
+#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
+pub struct RuntimeOptions {
+ /// The number of threads to execute the runtime for global read operations.
+ pub read_rt_size: usize,
+ /// The number of threads to execute the runtime for global write operations.
+ pub write_rt_size: usize,
+ /// The number of threads to execute the runtime for global background operations.
+ pub bg_rt_size: usize,
+}
+
+impl Default for RuntimeOptions {
+ fn default() -> Self {
+ let cpus = num_cpus::get();
+ Self {
+ read_rt_size: cpus,
+ write_rt_size: cpus,
+ bg_rt_size: cpus,
+ }
+ }
+}
+
pub fn create_runtime(runtime_name: &str, thread_name: &str, worker_threads: usize) -> Runtime {
info!("Creating runtime with runtime_name: {runtime_name}, thread_name: {thread_name}, work_threads: {worker_threads}.");
Builder::default()
@@ -112,18 +135,26 @@ static CONFIG_RUNTIMES: Lazy<Mutex<ConfigRuntimes>> =
/// # Panics
/// Panics when the global runtimes are already initialized.
/// You should call this function before using any runtime functions.
-pub fn init_global_runtimes(
- read: Option<Runtime>,
- write: Option<Runtime>,
- background: Option<Runtime>,
-) {
+pub fn init_global_runtimes(options: &RuntimeOptions) {
static START: Once = Once::new();
START.call_once(move || {
let mut c = CONFIG_RUNTIMES.lock().unwrap();
assert!(!c.already_init, "Global runtimes already initialized");
- c.read_runtime = read;
- c.write_runtime = write;
- c.bg_runtime = background;
+ c.read_runtime = Some(create_runtime(
+ "global-read",
+ "global-read-worker",
+ options.read_rt_size,
+ ));
+ c.write_runtime = Some(create_runtime(
+ "global-write",
+ "global-write-worker",
+ options.write_rt_size,
+ ));
+ c.bg_runtime = Some(create_runtime(
+ "global-bg",
+ "global-bg-worker",
+ options.bg_rt_size,
+ ));
});
}
diff --git a/src/common/runtime/src/lib.rs b/src/common/runtime/src/lib.rs
index 08baed46cbd3..ba6f74c96cc6 100644
--- a/src/common/runtime/src/lib.rs
+++ b/src/common/runtime/src/lib.rs
@@ -13,7 +13,7 @@
// limitations under the License.
pub mod error;
-mod global;
+pub mod global;
mod metrics;
mod repeated_task;
pub mod runtime;
diff --git a/src/common/test-util/Cargo.toml b/src/common/test-util/Cargo.toml
index 2b66dd45ce3a..b8084a2a8b3e 100644
--- a/src/common/test-util/Cargo.toml
+++ b/src/common/test-util/Cargo.toml
@@ -8,7 +8,7 @@ license.workspace = true
workspace = true
[dependencies]
-client.workspace = true
+client = { workspace = true, features = ["testing"] }
common-query.workspace = true
common-recordbatch.workspace = true
once_cell.workspace = true
diff --git a/src/common/test-util/src/recordbatch.rs b/src/common/test-util/src/recordbatch.rs
index 47c949d40715..eb666e167a31 100644
--- a/src/common/test-util/src/recordbatch.rs
+++ b/src/common/test-util/src/recordbatch.rs
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use client::Database;
use common_query::OutputData;
use common_recordbatch::util;
@@ -29,3 +30,25 @@ pub async fn check_output_stream(output: OutputData, expected: &str) {
let pretty_print = recordbatches.pretty_print().unwrap();
assert_eq!(pretty_print, expected, "actual: \n{}", pretty_print);
}
+
+pub async fn execute_and_check_output(db: &Database, sql: &str, expected: ExpectedOutput<'_>) {
+ let output = db.sql(sql).await.unwrap();
+ let output = output.data;
+
+ match (&output, expected) {
+ (OutputData::AffectedRows(x), ExpectedOutput::AffectedRows(y)) => {
+ assert_eq!(
+ *x, y,
+ r#"
+expected: {y}
+actual: {x}
+"#
+ )
+ }
+ (OutputData::RecordBatches(_), ExpectedOutput::QueryResult(x))
+ | (OutputData::Stream(_), ExpectedOutput::QueryResult(x)) => {
+ check_output_stream(output, x).await
+ }
+ _ => panic!(),
+ }
+}
diff --git a/src/datanode/src/config.rs b/src/datanode/src/config.rs
index ec278d3c4247..7e76c7d68169 100644
--- a/src/datanode/src/config.rs
+++ b/src/datanode/src/config.rs
@@ -15,7 +15,7 @@
//! Datanode configurations
use common_base::readable_size::ReadableSize;
-use common_base::secrets::SecretString;
+use common_base::secrets::{ExposeSecret, SecretString};
use common_config::Configurable;
use common_grpc::channel_manager::{
DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE, DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
@@ -38,7 +38,7 @@ pub const DEFAULT_OBJECT_STORE_CACHE_SIZE: ReadableSize = ReadableSize::mb(256);
const DEFAULT_DATA_HOME: &str = "/tmp/greptimedb";
/// Object storage config
-#[derive(Debug, Clone, Serialize, Deserialize)]
+#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(tag = "type")]
pub enum ObjectStoreConfig {
File(FileConfig),
@@ -61,7 +61,7 @@ impl ObjectStoreConfig {
}
/// Storage engine config
-#[derive(Debug, Clone, Serialize, Deserialize)]
+#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(default)]
pub struct StorageConfig {
/// The working directory of database
@@ -85,7 +85,7 @@ impl Default for StorageConfig {
#[serde(default)]
pub struct FileConfig {}
-#[derive(Debug, Clone, Serialize, Deserialize, Default)]
+#[derive(Debug, Clone, Serialize, Deserialize, Default, PartialEq)]
#[serde(default)]
pub struct ObjectStorageCacheConfig {
/// The local file cache directory
@@ -109,6 +109,18 @@ pub struct S3Config {
pub cache: ObjectStorageCacheConfig,
}
+impl PartialEq for S3Config {
+ fn eq(&self, other: &Self) -> bool {
+ self.bucket == other.bucket
+ && self.root == other.root
+ && self.access_key_id.expose_secret() == other.access_key_id.expose_secret()
+ && self.secret_access_key.expose_secret() == other.secret_access_key.expose_secret()
+ && self.endpoint == other.endpoint
+ && self.region == other.region
+ && self.cache == other.cache
+ }
+}
+
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)]
pub struct OssConfig {
@@ -123,6 +135,17 @@ pub struct OssConfig {
pub cache: ObjectStorageCacheConfig,
}
+impl PartialEq for OssConfig {
+ fn eq(&self, other: &Self) -> bool {
+ self.bucket == other.bucket
+ && self.root == other.root
+ && self.access_key_id.expose_secret() == other.access_key_id.expose_secret()
+ && self.access_key_secret.expose_secret() == other.access_key_secret.expose_secret()
+ && self.endpoint == other.endpoint
+ && self.cache == other.cache
+ }
+}
+
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)]
pub struct AzblobConfig {
@@ -138,6 +161,18 @@ pub struct AzblobConfig {
pub cache: ObjectStorageCacheConfig,
}
+impl PartialEq for AzblobConfig {
+ fn eq(&self, other: &Self) -> bool {
+ self.container == other.container
+ && self.root == other.root
+ && self.account_name.expose_secret() == other.account_name.expose_secret()
+ && self.account_key.expose_secret() == other.account_key.expose_secret()
+ && self.endpoint == other.endpoint
+ && self.sas_token == other.sas_token
+ && self.cache == other.cache
+ }
+}
+
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)]
pub struct GcsConfig {
@@ -151,6 +186,17 @@ pub struct GcsConfig {
pub cache: ObjectStorageCacheConfig,
}
+impl PartialEq for GcsConfig {
+ fn eq(&self, other: &Self) -> bool {
+ self.root == other.root
+ && self.bucket == other.bucket
+ && self.scope == other.scope
+ && self.credential_path.expose_secret() == other.credential_path.expose_secret()
+ && self.endpoint == other.endpoint
+ && self.cache == other.cache
+ }
+}
+
impl Default for S3Config {
fn default() -> Self {
Self {
@@ -211,7 +257,7 @@ impl Default for ObjectStoreConfig {
}
}
-#[derive(Clone, Debug, Serialize, Deserialize)]
+#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
#[serde(default)]
pub struct DatanodeOptions {
pub mode: Mode,
@@ -267,7 +313,7 @@ impl Default for DatanodeOptions {
}
}
-impl Configurable<'_> for DatanodeOptions {
+impl Configurable for DatanodeOptions {
fn env_list_keys() -> Option<&'static [&'static str]> {
Some(&["meta_client.metasrv_addrs", "wal.broker_endpoints"])
}
diff --git a/src/frontend/src/frontend.rs b/src/frontend/src/frontend.rs
index f0dfac1c7d5c..7907ff20ffe0 100644
--- a/src/frontend/src/frontend.rs
+++ b/src/frontend/src/frontend.rs
@@ -74,7 +74,7 @@ impl Default for FrontendOptions {
}
}
-impl Configurable<'_> for FrontendOptions {
+impl Configurable for FrontendOptions {
fn env_list_keys() -> Option<&'static [&'static str]> {
Some(&["meta_client.metasrv_addrs"])
}
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index a1cc8934270f..c04770313a11 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -188,7 +188,7 @@ impl Instance {
pub fn build_servers(
&mut self,
- opts: impl Into<FrontendOptions> + for<'de> Configurable<'de>,
+ opts: impl Into<FrontendOptions> + Configurable,
servers: ServerHandlers,
) -> Result<()> {
let opts: FrontendOptions = opts.into();
diff --git a/src/frontend/src/server.rs b/src/frontend/src/server.rs
index f5a0afb53016..268bc7db4ae6 100644
--- a/src/frontend/src/server.rs
+++ b/src/frontend/src/server.rs
@@ -39,7 +39,7 @@ use crate::service_config::GrpcOptions;
pub struct Services<T, U>
where
- T: Into<FrontendOptions> + for<'de> Configurable<'de> + Clone,
+ T: Into<FrontendOptions> + Configurable + Clone,
U: FrontendInstance,
{
opts: T,
@@ -51,7 +51,7 @@ where
impl<T, U> Services<T, U>
where
- T: Into<FrontendOptions> + for<'de> Configurable<'de> + Clone,
+ T: Into<FrontendOptions> + Configurable + Clone,
U: FrontendInstance,
{
pub fn new(opts: T, instance: Arc<U>, plugins: Plugins) -> Self {
diff --git a/src/frontend/src/service_config/datanode.rs b/src/frontend/src/service_config/datanode.rs
index ccf2b2ebf4c7..3b4de67b48c1 100644
--- a/src/frontend/src/service_config/datanode.rs
+++ b/src/frontend/src/service_config/datanode.rs
@@ -19,7 +19,7 @@ use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
pub struct DatanodeOptions {
- client: DatanodeClientOptions,
+ pub client: DatanodeClientOptions,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs
index 76fb794f797c..ce812cfba80f 100644
--- a/src/meta-srv/src/metasrv.rs
+++ b/src/meta-srv/src/metasrv.rs
@@ -148,7 +148,7 @@ impl Default for MetasrvOptions {
}
}
-impl Configurable<'_> for MetasrvOptions {
+impl Configurable for MetasrvOptions {
fn env_list_keys() -> Option<&'static [&'static str]> {
Some(&["wal.broker_endpoints"])
}
diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs
index 7cbd640820b1..4c1b4641c9f4 100644
--- a/tests-integration/src/test_util.rs
+++ b/tests-integration/src/test_util.rs
@@ -21,16 +21,13 @@ use std::time::Duration;
use auth::UserProviderRef;
use axum::Router;
use catalog::kvbackend::KvBackendCatalogManager;
-use client::Database;
use common_base::secrets::ExposeSecret;
use common_config::Configurable;
use common_meta::key::catalog_name::CatalogNameKey;
use common_meta::key::schema_name::SchemaNameKey;
-use common_query::OutputData;
use common_runtime::Builder as RuntimeBuilder;
use common_telemetry::warn;
use common_test_util::ports;
-use common_test_util::recordbatch::{check_output_stream, ExpectedOutput};
use common_test_util::temp_dir::{create_temp_dir, TempDir};
use common_wal::config::DatanodeWalConfig;
use datanode::config::{
@@ -690,25 +687,3 @@ where
test(endpoints).await
}
-
-pub async fn execute_and_check_output(db: &Database, sql: &str, expected: ExpectedOutput<'_>) {
- let output = db.sql(sql).await.unwrap();
- let output = output.data;
-
- match (&output, expected) {
- (OutputData::AffectedRows(x), ExpectedOutput::AffectedRows(y)) => {
- assert_eq!(
- *x, y,
- r#"
-expected: {y}
-actual: {x}
-"#
- )
- }
- (OutputData::RecordBatches(_), ExpectedOutput::QueryResult(x))
- | (OutputData::Stream(_), ExpectedOutput::QueryResult(x)) => {
- check_output_stream(output, x).await
- }
- _ => panic!(),
- }
-}
diff --git a/tests-integration/tests/grpc.rs b/tests-integration/tests/grpc.rs
index 7d1f9d57768f..33332170db16 100644
--- a/tests-integration/tests/grpc.rs
+++ b/tests-integration/tests/grpc.rs
@@ -28,6 +28,7 @@ use common_grpc::channel_manager::ClientTlsOption;
use common_query::Output;
use common_recordbatch::RecordBatches;
use common_runtime::Runtime;
+use common_test_util::find_workspace_path;
use servers::grpc::builder::GrpcServerBuilder;
use servers::grpc::GrpcServerConfig;
use servers::http::prometheus::{
@@ -732,10 +733,7 @@ async fn to_batch(output: Output) -> String {
}
pub async fn test_grpc_tls_config(store_type: StorageType) {
- let comm_dir = std::path::PathBuf::from_iter([
- std::env!("CARGO_RUSTC_CURRENT_DIR"),
- "src/common/grpc/tests/tls",
- ]);
+ let comm_dir = find_workspace_path("/src/common/grpc/tests/tls");
let ca_path = comm_dir.join("ca.pem").to_str().unwrap().to_string();
let server_cert_path = comm_dir.join("server.pem").to_str().unwrap().to_string();
let server_key_path = comm_dir.join("server.key").to_str().unwrap().to_string();
|
feat
|
set global runtime size by config file (#4063)
|
443722597b9fb0447e50a66ebde16794cf0ac926
|
2024-04-22 16:40:24
|
Ruihang Xia
|
ci: temporary disable compatibility test (#3767)
| false
|
diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml
index 753750ecda9c..16c85f40365b 100644
--- a/.github/workflows/develop.yml
+++ b/.github/workflows/develop.yml
@@ -330,20 +330,20 @@ jobs:
fail_ci_if_error: false
verbose: true
- compat:
- name: Compatibility Test
- needs: build
- runs-on: ubuntu-20.04
- timeout-minutes: 60
- steps:
- - uses: actions/checkout@v4
- - name: Download pre-built binaries
- uses: actions/download-artifact@v4
- with:
- name: bins
- path: .
- - name: Unzip binaries
- run: |
- mkdir -p ./bins/current
- tar -xvf ./bins.tar.gz --strip-components=1 -C ./bins/current
- - run: ./tests/compat/test-compat.sh 0.6.0
+ # compat:
+ # name: Compatibility Test
+ # needs: build
+ # runs-on: ubuntu-20.04
+ # timeout-minutes: 60
+ # steps:
+ # - uses: actions/checkout@v4
+ # - name: Download pre-built binaries
+ # uses: actions/download-artifact@v4
+ # with:
+ # name: bins
+ # path: .
+ # - name: Unzip binaries
+ # run: |
+ # mkdir -p ./bins/current
+ # tar -xvf ./bins.tar.gz --strip-components=1 -C ./bins/current
+ # - run: ./tests/compat/test-compat.sh 0.6.0
|
ci
|
temporary disable compatibility test (#3767)
|
1f1491e4299be862630e572a613ba83437f6a22e
|
2024-03-06 18:45:48
|
LFC
|
feat: impl some "set"s to adapt to some client apps (#3443)
| false
|
diff --git a/src/operator/src/statement.rs b/src/operator/src/statement.rs
index 42a44372fbad..b1948ce1e348 100644
--- a/src/operator/src/statement.rs
+++ b/src/operator/src/statement.rs
@@ -40,12 +40,13 @@ use query::plan::LogicalPlan;
use query::QueryEngineRef;
use session::context::QueryContextRef;
use session::table_name::table_idents_to_full_name;
-use snafu::{OptionExt, ResultExt};
+use snafu::{ensure, OptionExt, ResultExt};
use sql::statements::copy::{CopyDatabase, CopyDatabaseArgument, CopyTable, CopyTableArgument};
+use sql::statements::set_variables::SetVariables;
use sql::statements::statement::Statement;
use sql::statements::OptionMap;
use sql::util::format_raw_object_name;
-use sqlparser::ast::{Expr, ObjectName, Value};
+use sqlparser::ast::{Expr, Ident, ObjectName, Value};
use table::requests::{CopyDatabaseRequest, CopyDirection, CopyTableRequest};
use table::table_reference::TableReference;
use table::TableRef;
@@ -207,6 +208,22 @@ impl StatementExecutor {
let var_name = set_var.variable.to_string().to_uppercase();
match var_name.as_str() {
"TIMEZONE" | "TIME_ZONE" => set_timezone(set_var.value, query_ctx)?,
+
+ // Some postgresql client app may submit a "SET bytea_output" stmt upon connection.
+ // However, currently we lack the support for it (tracked in https://github.com/GreptimeTeam/greptimedb/issues/3438),
+ // so we just ignore it here instead of returning an error to break the connection.
+ // Since the "bytea_output" only determines the output format of binary values,
+ // it won't cause much trouble if we do so.
+ // TODO(#3438): Remove this temporary workaround after the feature is implemented.
+ "BYTEA_OUTPUT" => (),
+
+ // Same as "bytea_output", we just ignore it here.
+ // Not harmful since it only relates to how date is viewed in client app's output.
+ // The tracked issue is https://github.com/GreptimeTeam/greptimedb/issues/3442.
+ // TODO(#3442): Remove this temporary workaround after the feature is implemented.
+ "DATESTYLE" => (),
+
+ "CLIENT_ENCODING" => validate_client_encoding(set_var)?,
_ => {
return NotSupportedSnafu {
feat: format!("Unsupported set variable {}", var_name),
@@ -257,6 +274,39 @@ impl StatementExecutor {
}
}
+fn validate_client_encoding(set: SetVariables) -> Result<()> {
+ let Some((encoding, [])) = set.value.split_first() else {
+ return InvalidSqlSnafu {
+ err_msg: "must provide one and only one client encoding value",
+ }
+ .fail();
+ };
+ let encoding = match encoding {
+ Expr::Value(Value::SingleQuotedString(x))
+ | Expr::Identifier(Ident {
+ value: x,
+ quote_style: _,
+ }) => x.to_uppercase(),
+ _ => {
+ return InvalidSqlSnafu {
+ err_msg: format!("client encoding must be a string, actual: {:?}", encoding),
+ }
+ .fail();
+ }
+ };
+ // For the sake of simplicity, we only support "UTF8" ("UNICODE" is the alias for it,
+ // see https://www.postgresql.org/docs/current/multibyte.html#MULTIBYTE-CHARSET-SUPPORTED).
+ // "UTF8" is universal and sufficient for almost all cases.
+ // GreptimeDB itself is always using "UTF8" as the internal encoding.
+ ensure!(
+ encoding == "UTF8" || encoding == "UNICODE",
+ NotSupportedSnafu {
+ feat: format!("client encoding of '{}'", encoding)
+ }
+ );
+ Ok(())
+}
+
fn set_timezone(exprs: Vec<Expr>, ctx: QueryContextRef) -> Result<()> {
let tz_expr = exprs.first().context(NotSupportedSnafu {
feat: "No timezone find in set variable statement",
diff --git a/src/servers/src/mysql/federated.rs b/src/servers/src/mysql/federated.rs
index 5744894b9f91..ea3020cd7672 100644
--- a/src/servers/src/mysql/federated.rs
+++ b/src/servers/src/mysql/federated.rs
@@ -68,6 +68,7 @@ static OTHER_NOT_SUPPORTED_STMT: Lazy<RegexSet> = Lazy::new(|| {
"(?i)^(SET sql_mode(.*))",
"(?i)^(SET SQL_SELECT_LIMIT(.*))",
"(?i)^(SET @@(.*))",
+ "(?i)^(SET PROFILING(.*))",
"(?i)^(SHOW COLLATION)",
"(?i)^(SHOW CHARSET)",
|
feat
|
impl some "set"s to adapt to some client apps (#3443)
|
aafb468547075d127ac9288fb339a10d769ad643
|
2024-05-29 05:36:13
|
dennis zhuang
|
fix: set local or session time_zone not work (#4064)
| false
|
diff --git a/src/servers/src/mysql/federated.rs b/src/servers/src/mysql/federated.rs
index 13f8a63bb588..635e2819c7b3 100644
--- a/src/servers/src/mysql/federated.rs
+++ b/src/servers/src/mysql/federated.rs
@@ -72,7 +72,6 @@ static OTHER_NOT_SUPPORTED_STMT: Lazy<RegexSet> = Lazy::new(|| {
"(?i)^(SELECT \\$\\$)",
// mysqldump.
- "(?i)^(SET SESSION(.*))",
"(?i)^(SET SQL_QUOTE_SHOW_CREATE(.*))",
"(?i)^(LOCK TABLES(.*))",
"(?i)^(UNLOCK TABLES(.*))",
diff --git a/src/sql/src/parsers/set_var_parser.rs b/src/sql/src/parsers/set_var_parser.rs
index 8c2549aac56c..e4afd3e6cae6 100644
--- a/src/sql/src/parsers/set_var_parser.rs
+++ b/src/sql/src/parsers/set_var_parser.rs
@@ -15,6 +15,7 @@
use snafu::ResultExt;
use sqlparser::ast::Statement as SpStatement;
+use crate::ast::{Ident, ObjectName};
use crate::error::{self, Result};
use crate::parser::ParserContext;
use crate::statements::set_variables::SetVariables;
@@ -29,11 +30,18 @@ impl<'a> ParserContext<'a> {
SpStatement::SetVariable {
variable,
value,
- local,
hivevar,
- } if !local && !hivevar => {
- Ok(Statement::SetVariables(SetVariables { variable, value }))
- }
+ ..
+ } if !hivevar => Ok(Statement::SetVariables(SetVariables { variable, value })),
+
+ SpStatement::SetTimeZone { value, .. } => Ok(Statement::SetVariables(SetVariables {
+ variable: ObjectName(vec![Ident {
+ value: "TIMEZONE".to_string(),
+ quote_style: None,
+ }]),
+ value: vec![value],
+ })),
+
unexp => error::UnsupportedSnafu {
sql: self.sql.to_string(),
keyword: unexp.to_string(),
@@ -51,10 +59,7 @@ mod tests {
use crate::dialect::GreptimeDbDialect;
use crate::parser::ParseOptions;
- #[test]
- pub fn test_set_timezone() {
- // mysql style
- let sql = "SET time_zone = 'UTC'";
+ fn assert_mysql_parse_result(sql: &str) {
let result =
ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default());
let mut stmts = result.unwrap();
@@ -65,8 +70,9 @@ mod tests {
value: vec![Expr::Value(Value::SingleQuotedString("UTC".to_string()))]
})
);
- // postgresql style
- let sql = "SET TIMEZONE TO 'UTC'";
+ }
+
+ fn assert_pg_parse_result(sql: &str) {
let result =
ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default());
let mut stmts = result.unwrap();
@@ -78,4 +84,22 @@ mod tests {
})
);
}
+
+ #[test]
+ pub fn test_set_timezone() {
+ // mysql style
+ let sql = "SET time_zone = 'UTC'";
+ assert_mysql_parse_result(sql);
+ // session or local style
+ let sql = "SET LOCAL time_zone = 'UTC'";
+ assert_mysql_parse_result(sql);
+ let sql = "SET SESSION time_zone = 'UTC'";
+ assert_mysql_parse_result(sql);
+
+ // postgresql style
+ let sql = "SET TIMEZONE TO 'UTC'";
+ assert_pg_parse_result(sql);
+ let sql = "SET TIMEZONE 'UTC'";
+ assert_pg_parse_result(sql);
+ }
}
diff --git a/tests/cases/standalone/common/system/timezone.result b/tests/cases/standalone/common/system/timezone.result
index 586e8ba4fab9..26b15544f82e 100644
--- a/tests/cases/standalone/common/system/timezone.result
+++ b/tests/cases/standalone/common/system/timezone.result
@@ -187,7 +187,7 @@ select to_unixtime('2024-01-02 00:00:00+08:00');
+------------------------------------------------+
--- UTC-8 ---
-SET TIME_ZONE = '-8:00';
+SET SESSION TIME_ZONE = '-8:00';
Affected Rows: 0
@@ -281,7 +281,7 @@ drop table test;
Affected Rows: 0
-- revert timezone to UTC
-SET TIME_ZONE = 'UTC';
+SET LOCAL TIME_ZONE = 'UTC';
Affected Rows: 0
diff --git a/tests/cases/standalone/common/system/timezone.sql b/tests/cases/standalone/common/system/timezone.sql
index 0bd2a9c91352..828d29421e6d 100644
--- a/tests/cases/standalone/common/system/timezone.sql
+++ b/tests/cases/standalone/common/system/timezone.sql
@@ -48,7 +48,7 @@ select to_unixtime('2024-01-02 00:00:00');
select to_unixtime('2024-01-02 00:00:00+08:00');
--- UTC-8 ---
-SET TIME_ZONE = '-8:00';
+SET SESSION TIME_ZONE = '-8:00';
SHOW VARIABLES time_zone;
@@ -71,7 +71,7 @@ select to_unixtime('2024-01-02 00:00:00+08:00');
drop table test;
-- revert timezone to UTC
-SET TIME_ZONE = 'UTC';
+SET LOCAL TIME_ZONE = 'UTC';
SHOW VARIABLES time_zone;
diff --git a/tests/runner/src/env.rs b/tests/runner/src/env.rs
index ea3e3e1bc10a..151d395dc886 100644
--- a/tests/runner/src/env.rs
+++ b/tests/runner/src/env.rs
@@ -436,7 +436,9 @@ impl Database for GreptimeDB {
let mut client = self.client.lock().await;
- if query.trim().to_lowercase().starts_with("use ") {
+ let query_str = query.trim().to_lowercase();
+
+ if query_str.starts_with("use ") {
// use [db]
let database = query
.split_ascii_whitespace()
@@ -447,7 +449,10 @@ impl Database for GreptimeDB {
Box::new(ResultDisplayer {
result: Ok(Output::new_with_affected_rows(0)),
}) as _
- } else if query.trim().to_lowercase().starts_with("set time_zone") {
+ } else if query_str.starts_with("set time_zone")
+ || query_str.starts_with("set session time_zone")
+ || query_str.starts_with("set local time_zone")
+ {
// set time_zone='xxx'
let timezone = query
.split('=')
|
fix
|
set local or session time_zone not work (#4064)
|
22c8a7656bf1e2da22c02d3606b0f29bf25ba0ce
|
2024-11-14 15:48:50
|
zyy17
|
chore: update cluster dashboard (#4995)
| false
|
diff --git a/grafana/greptimedb-cluster.json b/grafana/greptimedb-cluster.json
index 72d5a862df16..1fcfb2ecad97 100644
--- a/grafana/greptimedb-cluster.json
+++ b/grafana/greptimedb-cluster.json
@@ -25,6 +25,7 @@
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 1,
+ "id": 4,
"links": [],
"liveNow": false,
"panels": [
@@ -1628,7 +1629,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -1753,7 +1755,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -1767,7 +1770,7 @@
},
"gridPos": {
"h": 10,
- "w": 9,
+ "w": 8,
"x": 8,
"y": 24
},
@@ -1878,7 +1881,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -1892,8 +1896,8 @@
},
"gridPos": {
"h": 10,
- "w": 7,
- "x": 17,
+ "w": 8,
+ "x": 16,
"y": 24
},
"id": 235,
@@ -1956,24 +1960,12 @@
"title": "Metasrv Memory",
"type": "timeseries"
},
- {
- "collapsed": false,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 34
- },
- "id": 192,
- "panels": [],
- "title": "Frontend APIs",
- "type": "row"
- },
{
"datasource": {
"type": "prometheus",
"uid": "${metrics}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -2016,7 +2008,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -2024,20 +2017,22 @@
}
]
},
- "unit": "ops"
+ "unit": "decbytes"
},
"overrides": []
},
"gridPos": {
- "h": 8,
- "w": 12,
+ "h": 10,
+ "w": 8,
"x": 0,
- "y": 35
+ "y": 34
},
- "id": 202,
+ "id": 256,
"options": {
"legend": {
- "calcs": [],
+ "calcs": [
+ "lastNotNull"
+ ],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
@@ -2054,14 +2049,14 @@
"uid": "${metrics}"
},
"editorMode": "code",
- "expr": "sum by(pod, path, method, code) (rate(greptime_servers_http_requests_elapsed_count{pod=~\"$frontend\",path!~\"/health|/metrics\"}[$__rate_interval]))",
+ "expr": "sum(sys_jemalloc_resident{pod=~\"$datanode\"}) by (pod)",
"instant": false,
- "legendFormat": "[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-qps",
+ "legendFormat": "[{{ pod }}]-resident",
"range": true,
"refId": "A"
}
],
- "title": "HTTP QPS per Instance",
+ "title": "Datanode Memory per Instance",
"type": "timeseries"
},
{
@@ -2069,6 +2064,7 @@
"type": "prometheus",
"uid": "${metrics}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -2081,7 +2077,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
- "drawStyle": "points",
+ "drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
@@ -2111,7 +2107,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -2119,20 +2116,22 @@
}
]
},
- "unit": "s"
+ "unit": "decbytes"
},
"overrides": []
},
"gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 35
+ "h": 10,
+ "w": 8,
+ "x": 8,
+ "y": 34
},
- "id": 203,
+ "id": 257,
"options": {
"legend": {
- "calcs": [],
+ "calcs": [
+ "lastNotNull"
+ ],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
@@ -2149,14 +2148,14 @@
"uid": "${metrics}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by(pod, le, path, method, code) (rate(greptime_servers_http_requests_elapsed_bucket{pod=~\"$frontend\",path!~\"/health|/metrics\"}[$__rate_interval])))",
+ "expr": "sum(sys_jemalloc_resident{pod=~\"$frontend\"}) by (pod)",
"instant": false,
- "legendFormat": "[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99",
+ "legendFormat": "[{{ pod }}]-resident",
"range": true,
"refId": "A"
}
],
- "title": "HTTP P99 per Instance",
+ "title": "Frontend Memory per Instance",
"type": "timeseries"
},
{
@@ -2164,6 +2163,7 @@
"type": "prometheus",
"uid": "${metrics}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -2206,7 +2206,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -2214,45 +2215,22 @@
}
]
},
- "unit": "ops"
+ "unit": "decbytes"
},
- "overrides": [
- {
- "__systemRef": "hideSeriesFrom",
- "matcher": {
- "id": "byNames",
- "options": {
- "mode": "exclude",
- "names": [
- "[mycluster-frontend-5f94445cf8-mcmhf]-[/v1/prometheus/write]-[POST]-[204]-qps"
- ],
- "prefix": "All except:",
- "readOnly": true
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": false,
- "tooltip": false,
- "viz": true
- }
- }
- ]
- }
- ]
+ "overrides": []
},
"gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 43
+ "h": 10,
+ "w": 8,
+ "x": 16,
+ "y": 34
},
- "id": 211,
+ "id": 258,
"options": {
"legend": {
- "calcs": [],
+ "calcs": [
+ "lastNotNull"
+ ],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
@@ -2269,14 +2247,14 @@
"uid": "${metrics}"
},
"editorMode": "code",
- "expr": "sum by(pod, path, code) (rate(greptime_servers_grpc_requests_elapsed_count{pod=~\"$frontend\"}[$__rate_interval]))",
+ "expr": "sum(sys_jemalloc_resident{pod=~\"$metasrv\"}) by (pod)",
"instant": false,
- "legendFormat": "[{{pod}}]-[{{path}}]-[{{code}}]-qps",
+ "legendFormat": "[{{ pod }}]-resident",
"range": true,
"refId": "A"
}
],
- "title": "gRPC QPS per Instance",
+ "title": "Metasrv Memory per Instance",
"type": "timeseries"
},
{
@@ -2284,6 +2262,7 @@
"type": "prometheus",
"uid": "${metrics}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -2296,7 +2275,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
- "drawStyle": "points",
+ "drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
@@ -2326,7 +2305,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -2334,23 +2314,27 @@
}
]
},
- "unit": "s"
+ "unit": "percentunit"
},
"overrides": []
},
"gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 43
+ "h": 10,
+ "w": 8,
+ "x": 0,
+ "y": 44
},
- "id": 212,
+ "id": 259,
"options": {
"legend": {
- "calcs": [],
+ "calcs": [
+ "lastNotNull"
+ ],
"displayMode": "table",
"placement": "bottom",
- "showLegend": true
+ "showLegend": true,
+ "sortBy": "Last *",
+ "sortDesc": true
},
"tooltip": {
"mode": "single",
@@ -2364,14 +2348,14 @@
"uid": "${metrics}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by(pod, le, path, code) (rate(greptime_servers_grpc_requests_elapsed_bucket{pod=~\"$frontend\"}[$__rate_interval])))",
+ "expr": "sum(rate(process_cpu_seconds_total{pod=~\"$datanode\"}[$__rate_interval]))",
"instant": false,
- "legendFormat": "[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99",
+ "legendFormat": "cpu",
"range": true,
"refId": "A"
}
],
- "title": "gRPC P99 per Instance",
+ "title": "Datanode CPU Usage",
"type": "timeseries"
},
{
@@ -2379,6 +2363,7 @@
"type": "prometheus",
"uid": "${metrics}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -2421,7 +2406,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -2429,48 +2415,27 @@
}
]
},
- "unit": "ops"
+ "unit": "percentunit"
},
- "overrides": [
- {
- "__systemRef": "hideSeriesFrom",
- "matcher": {
- "id": "byNames",
- "options": {
- "mode": "exclude",
- "names": [
- "[mycluster-frontend-5c59b4cc9b-kpb6q]-qps"
- ],
- "prefix": "All except:",
- "readOnly": true
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": false,
- "tooltip": false,
- "viz": true
- }
- }
- ]
- }
- ]
+ "overrides": []
},
"gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 51
+ "h": 10,
+ "w": 8,
+ "x": 8,
+ "y": 44
},
- "id": 213,
+ "id": 260,
"options": {
"legend": {
- "calcs": [],
+ "calcs": [
+ "lastNotNull"
+ ],
"displayMode": "table",
"placement": "bottom",
- "showLegend": true
+ "showLegend": true,
+ "sortBy": "Last *",
+ "sortDesc": true
},
"tooltip": {
"mode": "single",
@@ -2484,14 +2449,14 @@
"uid": "${metrics}"
},
"editorMode": "code",
- "expr": "sum by(pod)(rate(greptime_servers_mysql_query_elapsed_count{pod=~\"$frontend\"}[$__rate_interval]))",
+ "expr": "sum(rate(process_cpu_seconds_total{pod=~\"$frontend\"}[$__rate_interval]))",
"instant": false,
- "legendFormat": "[{{pod}}]-qps",
+ "legendFormat": "cpu",
"range": true,
"refId": "A"
}
],
- "title": "MySQL QPS per Instance",
+ "title": "Frontend CPU Usage",
"type": "timeseries"
},
{
@@ -2499,6 +2464,7 @@
"type": "prometheus",
"uid": "${metrics}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -2511,7 +2477,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
- "drawStyle": "points",
+ "drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
@@ -2541,7 +2507,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -2549,23 +2516,27 @@
}
]
},
- "unit": "s"
+ "unit": "percentunit"
},
"overrides": []
},
"gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 51
+ "h": 10,
+ "w": 8,
+ "x": 16,
+ "y": 44
},
- "id": 214,
+ "id": 261,
"options": {
"legend": {
- "calcs": [],
+ "calcs": [
+ "lastNotNull"
+ ],
"displayMode": "table",
"placement": "bottom",
- "showLegend": true
+ "showLegend": true,
+ "sortBy": "Last *",
+ "sortDesc": true
},
"tooltip": {
"mode": "single",
@@ -2579,15 +2550,14 @@
"uid": "${metrics}"
},
"editorMode": "code",
- "exemplar": false,
- "expr": "histogram_quantile(0.99, sum by(pod) (rate(greptime_servers_mysql_query_elapsed_bucket{pod=~\"$frontend\"}[$__rate_interval])))",
+ "expr": "sum(rate(process_cpu_seconds_total{pod=~\"$metasrv\"}[$__rate_interval]))",
"instant": false,
- "legendFormat": "[{{pod}}]-p99",
+ "legendFormat": "cpu",
"range": true,
"refId": "A"
}
],
- "title": "MySQL P99 per Instance",
+ "title": "Metasrv CPU Usage",
"type": "timeseries"
},
{
@@ -2595,6 +2565,7 @@
"type": "prometheus",
"uid": "${metrics}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -2637,7 +2608,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -2645,48 +2617,27 @@
}
]
},
- "unit": "ops"
+ "unit": "percentunit"
},
- "overrides": [
- {
- "__systemRef": "hideSeriesFrom",
- "matcher": {
- "id": "byNames",
- "options": {
- "mode": "exclude",
- "names": [
- "[mycluster-frontend-5f94445cf8-mcmhf]-[/v1/prometheus/write]-[POST]-[204]-qps"
- ],
- "prefix": "All except:",
- "readOnly": true
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": false,
- "tooltip": false,
- "viz": true
- }
- }
- ]
- }
- ]
+ "overrides": []
},
"gridPos": {
- "h": 8,
- "w": 12,
+ "h": 10,
+ "w": 8,
"x": 0,
- "y": 59
+ "y": 54
},
- "id": 215,
+ "id": 262,
"options": {
"legend": {
- "calcs": [],
+ "calcs": [
+ "lastNotNull"
+ ],
"displayMode": "table",
"placement": "bottom",
- "showLegend": true
+ "showLegend": true,
+ "sortBy": "Last *",
+ "sortDesc": true
},
"tooltip": {
"mode": "single",
@@ -2700,14 +2651,638 @@
"uid": "${metrics}"
},
"editorMode": "code",
- "expr": "sum by(pod) (rate(greptime_servers_postgres_query_elapsed_count{pod=~\"$frontend\"}[$__rate_interval]))",
+ "expr": "sum(rate(process_cpu_seconds_total{pod=~\"$datanode\"}[$__rate_interval])) by (pod)",
+ "instant": false,
+ "legendFormat": "[{{ pod }}]-cpu",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Datanode CPU Usage per Instance",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${metrics}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 8,
+ "x": 8,
+ "y": 54
+ },
+ "id": 263,
+ "options": {
+ "legend": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true,
+ "sortBy": "Last *",
+ "sortDesc": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${metrics}"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(process_cpu_seconds_total{pod=~\"$frontend\"}[$__rate_interval])) by (pod)",
+ "instant": false,
+ "legendFormat": "[{{ pod }}]-cpu",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Frontend CPU Usage per Instance",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${metrics}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 8,
+ "x": 16,
+ "y": 54
+ },
+ "id": 264,
+ "options": {
+ "legend": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true,
+ "sortBy": "Last *",
+ "sortDesc": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${metrics}"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(process_cpu_seconds_total{pod=~\"$metasrv\"}[$__rate_interval])) by (pod)",
+ "instant": false,
+ "legendFormat": "[{{ pod }}]-cpu",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Metasrv CPU Usage per Instance",
+ "type": "timeseries"
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 64
+ },
+ "id": 192,
+ "panels": [],
+ "title": "Frontend APIs",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${metrics}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ops"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 65
+ },
+ "id": 202,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${metrics}"
+ },
+ "editorMode": "code",
+ "expr": "sum by(pod, path, method, code) (rate(greptime_servers_http_requests_elapsed_count{pod=~\"$frontend\",path!~\"/health|/metrics\"}[$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-qps",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "HTTP QPS per Instance",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${metrics}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 65
+ },
+ "id": 203,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${metrics}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.99, sum by(pod, le, path, method, code) (rate(greptime_servers_http_requests_elapsed_bucket{pod=~\"$frontend\",path!~\"/health|/metrics\"}[$__rate_interval])))",
+ "instant": false,
+ "legendFormat": "[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "HTTP P99 per Instance",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${metrics}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ops"
+ },
+ "overrides": [
+ {
+ "__systemRef": "hideSeriesFrom",
+ "matcher": {
+ "id": "byNames",
+ "options": {
+ "mode": "exclude",
+ "names": [
+ "[mycluster-frontend-5f94445cf8-mcmhf]-[/v1/prometheus/write]-[POST]-[204]-qps"
+ ],
+ "prefix": "All except:",
+ "readOnly": true
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": false,
+ "tooltip": false,
+ "viz": true
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 73
+ },
+ "id": 211,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${metrics}"
+ },
+ "editorMode": "code",
+ "expr": "sum by(pod, path, code) (rate(greptime_servers_grpc_requests_elapsed_count{pod=~\"$frontend\"}[$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "[{{pod}}]-[{{path}}]-[{{code}}]-qps",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "gRPC QPS per Instance",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${metrics}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 73
+ },
+ "id": 212,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${metrics}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.99, sum by(pod, le, path, code) (rate(greptime_servers_grpc_requests_elapsed_bucket{pod=~\"$frontend\"}[$__rate_interval])))",
"instant": false,
- "legendFormat": "[{{pod}}]-qps",
+ "legendFormat": "[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99",
"range": true,
"refId": "A"
}
],
- "title": "PostgreSQL QPS per Instance",
+ "title": "gRPC P99 per Instance",
"type": "timeseries"
},
{
@@ -2727,7 +3302,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
- "drawStyle": "points",
+ "drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
@@ -2757,7 +3332,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -2765,17 +3341,42 @@
}
]
},
- "unit": "s"
+ "unit": "ops"
},
- "overrides": []
+ "overrides": [
+ {
+ "__systemRef": "hideSeriesFrom",
+ "matcher": {
+ "id": "byNames",
+ "options": {
+ "mode": "exclude",
+ "names": [
+ "[mycluster-frontend-5c59b4cc9b-kpb6q]-qps"
+ ],
+ "prefix": "All except:",
+ "readOnly": true
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": false,
+ "tooltip": false,
+ "viz": true
+ }
+ }
+ ]
+ }
+ ]
},
"gridPos": {
"h": 8,
"w": 12,
- "x": 12,
- "y": 59
+ "x": 0,
+ "y": 81
},
- "id": 216,
+ "id": 213,
"options": {
"legend": {
"calcs": [],
@@ -2795,29 +3396,16 @@
"uid": "${metrics}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by(pod) (rate(greptime_servers_postgres_query_elapsed_count{pod=~\"$frontend\"}[$__rate_interval])))",
+ "expr": "sum by(pod)(rate(greptime_servers_mysql_query_elapsed_count{pod=~\"$frontend\"}[$__rate_interval]))",
"instant": false,
- "legendFormat": "[{{pod}}]-p99",
+ "legendFormat": "[{{pod}}]-qps",
"range": true,
"refId": "A"
}
],
- "title": "PostgreSQL P99 per Instance",
+ "title": "MySQL QPS per Instance",
"type": "timeseries"
},
- {
- "collapsed": false,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 67
- },
- "id": 217,
- "panels": [],
- "title": "Frontend <-> Datanode",
- "type": "row"
- },
{
"datasource": {
"type": "prometheus",
@@ -2835,7 +3423,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
- "drawStyle": "line",
+ "drawStyle": "points",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
@@ -2865,7 +3453,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -2873,22 +3462,20 @@
}
]
},
- "unit": "rowsps"
+ "unit": "s"
},
"overrides": []
},
"gridPos": {
- "h": 6,
- "w": 24,
- "x": 0,
- "y": 68
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 81
},
- "id": 218,
+ "id": 214,
"options": {
"legend": {
- "calcs": [
- "lastNotNull"
- ],
+ "calcs": [],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
@@ -2905,14 +3492,15 @@
"uid": "${metrics}"
},
"editorMode": "code",
- "expr": "sum by(pod)(rate(greptime_table_operator_ingest_rows{pod=~\"$frontend\"}[$__rate_interval]))",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.99, sum by(pod, le) (rate(greptime_servers_mysql_query_elapsed_bucket{pod=~\"$frontend\"}[$__rate_interval])))",
"instant": false,
- "legendFormat": "[{{pod}}]-rps",
+ "legendFormat": "[{{ pod }}]-p99",
"range": true,
"refId": "A"
}
],
- "title": "Ingest Rows per Instance",
+ "title": "MySQL P99 per Instance",
"type": "timeseries"
},
{
@@ -2962,7 +3550,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -2972,19 +3561,44 @@
},
"unit": "ops"
},
- "overrides": []
+ "overrides": [
+ {
+ "__systemRef": "hideSeriesFrom",
+ "matcher": {
+ "id": "byNames",
+ "options": {
+ "mode": "exclude",
+ "names": [
+ "[mycluster-frontend-5f94445cf8-mcmhf]-[/v1/prometheus/write]-[POST]-[204]-qps"
+ ],
+ "prefix": "All except:",
+ "readOnly": true
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": false,
+ "tooltip": false,
+ "viz": true
+ }
+ }
+ ]
+ }
+ ]
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
- "y": 74
+ "y": 89
},
- "id": 219,
+ "id": 215,
"options": {
"legend": {
"calcs": [],
- "displayMode": "list",
+ "displayMode": "table",
"placement": "bottom",
"showLegend": true
},
@@ -3000,14 +3614,14 @@
"uid": "${metrics}"
},
"editorMode": "code",
- "expr": "sum by(pod, request_type) (rate(greptime_grpc_region_request_count{pod=~\"$frontend\"}[$__rate_interval]))",
+ "expr": "sum by(pod) (rate(greptime_servers_postgres_query_elapsed_count{pod=~\"$frontend\"}[$__rate_interval]))",
"instant": false,
- "legendFormat": "[{{pod}}]-[{{request_type}}]-qps",
+ "legendFormat": "[{{pod}}]-qps",
"range": true,
"refId": "A"
}
],
- "title": "Region Call QPS per Instance",
+ "title": "PostgreSQL QPS per Instance",
"type": "timeseries"
},
{
@@ -3015,7 +3629,6 @@
"type": "prometheus",
"uid": "${metrics}"
},
- "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -3058,7 +3671,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -3074,9 +3688,9 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 74
+ "y": 89
},
- "id": 220,
+ "id": 216,
"options": {
"legend": {
"calcs": [],
@@ -3096,23 +3710,325 @@
"uid": "${metrics}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by(pod, le, request_type) (rate(greptime_grpc_region_request_bucket{pod=~\"$frontend\"}[$__rate_interval])))",
+ "expr": "histogram_quantile(0.99, sum by(pod, le) (rate(greptime_servers_postgres_query_elapsed_count{pod=~\"$frontend\"}[$__rate_interval])))",
"instant": false,
- "legendFormat": "[{{pod}}]-[{{request_type}}]-p99",
+ "legendFormat": "[{{pod}}]-p99",
"range": true,
"refId": "A"
}
],
- "title": "Region Call P99 per Instance",
+ "title": "PostgreSQL P99 per Instance",
"type": "timeseries"
},
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 97
+ },
+ "id": 217,
+ "panels": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${metrics}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "rowsps"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 24,
+ "x": 0,
+ "y": 26
+ },
+ "id": 218,
+ "options": {
+ "legend": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${metrics}"
+ },
+ "editorMode": "code",
+ "expr": "sum by(pod)(rate(greptime_table_operator_ingest_rows{pod=~\"$frontend\"}[$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "[{{pod}}]-rps",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Ingest Rows per Instance",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${metrics}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ops"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 32
+ },
+ "id": 219,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${metrics}"
+ },
+ "editorMode": "code",
+ "expr": "sum by(pod, request_type) (rate(greptime_grpc_region_request_count{pod=~\"$frontend\"}[$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "[{{pod}}]-[{{request_type}}]-qps",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Region Call QPS per Instance",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${metrics}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 32
+ },
+ "id": 220,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${metrics}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.99, sum by(pod, le, request_type) (rate(greptime_grpc_region_request_bucket{pod=~\"$frontend\"}[$__rate_interval])))",
+ "instant": false,
+ "legendFormat": "[{{pod}}]-[{{request_type}}]-p99",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Region Call P99 per Instance",
+ "type": "timeseries"
+ }
+ ],
+ "title": "Frontend <-> Datanode",
+ "type": "row"
+ },
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 82
+ "y": 98
},
"id": 194,
"panels": [],
@@ -3166,7 +4082,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -3182,7 +4099,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 83
+ "y": 99
},
"id": 201,
"options": {
@@ -3261,7 +4178,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -3277,7 +4195,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 83
+ "y": 99
},
"id": 222,
"options": {
@@ -3356,7 +4274,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -3372,7 +4291,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 91
+ "y": 107
},
"id": 200,
"options": {
@@ -3451,7 +4370,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -3467,7 +4387,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 91
+ "y": 107
},
"id": 221,
"options": {
@@ -3546,7 +4466,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -3562,7 +4483,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 99
+ "y": 115
},
"id": 224,
"options": {
@@ -3641,7 +4562,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -3657,7 +4579,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 99
+ "y": 115
},
"id": 229,
"options": {
@@ -3736,7 +4658,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -3752,7 +4675,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 107
+ "y": 123
},
"id": 227,
"options": {
@@ -3833,7 +4756,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -3849,7 +4773,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 107
+ "y": 123
},
"id": 228,
"options": {
@@ -3930,7 +4854,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -3946,7 +4871,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 115
+ "y": 131
},
"id": 231,
"options": {
@@ -4025,7 +4950,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -4041,7 +4967,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 115
+ "y": 131
},
"id": 230,
"options": {
@@ -4122,7 +5048,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -4138,7 +5065,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 123
+ "y": 139
},
"id": 225,
"options": {
@@ -4219,7 +5146,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -4235,7 +5163,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 123
+ "y": 139
},
"id": 232,
"options": {
@@ -4275,7 +5203,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 131
+ "y": 147
},
"id": 195,
"panels": [],
@@ -4329,7 +5257,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -4345,7 +5274,7 @@
"h": 10,
"w": 24,
"x": 0,
- "y": 132
+ "y": 148
},
"id": 209,
"options": {
@@ -4424,7 +5353,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -4440,7 +5370,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 142
+ "y": 158
},
"id": 196,
"options": {
@@ -4519,7 +5449,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -4535,7 +5466,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 142
+ "y": 158
},
"id": 198,
"options": {
@@ -4614,7 +5545,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -4630,7 +5562,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 149
+ "y": 165
},
"id": 199,
"options": {
@@ -4709,7 +5641,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -4725,7 +5658,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 149
+ "y": 165
},
"id": 204,
"options": {
@@ -4804,7 +5737,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -4820,7 +5754,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 156
+ "y": 172
},
"id": 205,
"options": {
@@ -4900,7 +5834,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -4916,7 +5851,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 156
+ "y": 172
},
"id": 206,
"options": {
@@ -4996,7 +5931,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -5012,7 +5948,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 163
+ "y": 179
},
"id": 207,
"options": {
@@ -5091,7 +6027,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -5107,7 +6044,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 163
+ "y": 179
},
"id": 210,
"options": {
@@ -5140,7 +6077,7 @@
"type": "timeseries"
}
],
- "refresh": "",
+ "refresh": "10s",
"schemaVersion": 39,
"tags": [],
"templating": {
@@ -5226,7 +6163,7 @@
"type": "prometheus",
"uid": "${metrics}"
},
- "definition": "label_values(greptime_app_version{app=~\"$role\"},pod)",
+ "definition": "label_values(greptime_app_version{app=~\"$roles\"},pod)",
"hide": 0,
"includeAll": true,
"multi": true,
@@ -5234,7 +6171,7 @@
"options": [],
"query": {
"qryType": 1,
- "query": "label_values(greptime_app_version{app=~\"$role\"},pod)",
+ "query": "label_values(greptime_app_version{app=~\"$roles\"},pod)",
"refId": "PrometheusVariableQueryEditor-VariableQuery"
},
"refresh": 1,
@@ -5363,4 +6300,4 @@
"uid": "ce3q6xwn3xa0wa",
"version": 1,
"weekStart": ""
-}
+}
\ No newline at end of file
|
chore
|
update cluster dashboard (#4995)
|
8087822ab2f37899d7dda8eac1c0cb711c4c13f1
|
2024-02-26 12:20:55
|
Lei, HUANG
|
refactor: change the receivers of merge tree components (#3378)
| false
|
diff --git a/src/mito2/src/memtable.rs b/src/mito2/src/memtable.rs
index 81750de4db4a..c5cd32bf0ec5 100644
--- a/src/mito2/src/memtable.rs
+++ b/src/mito2/src/memtable.rs
@@ -15,7 +15,6 @@
//! Memtables are write buffers for regions.
pub mod key_values;
-#[allow(dead_code)]
pub mod merge_tree;
pub mod time_series;
pub(crate) mod version;
diff --git a/src/mito2/src/memtable/merge_tree/data.rs b/src/mito2/src/memtable/merge_tree/data.rs
index 71ee32bb1b16..343ed8e2884a 100644
--- a/src/mito2/src/memtable/merge_tree/data.rs
+++ b/src/mito2/src/memtable/merge_tree/data.rs
@@ -30,7 +30,7 @@ use datatypes::types::TimestampType;
use datatypes::vectors::{
TimestampMicrosecondVector, TimestampMillisecondVector, TimestampNanosecondVector,
TimestampSecondVector, UInt16Vector, UInt16VectorBuilder, UInt64Vector, UInt64VectorBuilder,
- UInt8VectorBuilder,
+ UInt8Vector, UInt8VectorBuilder,
};
use parquet::arrow::arrow_reader::{ParquetRecordBatchReader, ParquetRecordBatchReaderBuilder};
use parquet::arrow::ArrowWriter;
@@ -65,10 +65,6 @@ impl DataBatchRange {
pub(crate) fn len(&self) -> usize {
self.end - self.start
}
-
- pub(crate) fn is_empty(&self) -> bool {
- self.len() == 0
- }
}
/// Data part batches returns by `DataParts::read`.
@@ -89,10 +85,6 @@ impl<'a> DataBatch<'a> {
self.range
}
- pub(crate) fn is_empty(&self) -> bool {
- self.range.is_empty()
- }
-
pub(crate) fn slice_record_batch(&self) -> RecordBatch {
self.rb.slice(self.range.start, self.range.len())
}
@@ -258,12 +250,11 @@ impl DataBuffer {
/// Reads batches from data buffer without resetting builder's buffers.
/// If pk_weights is present, yielded rows are sorted according to weights,
/// otherwise rows are sorted by "pk_weights" values as they are actually weights.
- pub fn read(&mut self, pk_weights: Option<&[u16]>) -> Result<DataBufferReader> {
- let batch = data_buffer_to_record_batches(
+ pub fn read(&self, pk_weights: Option<&[u16]>) -> Result<DataBufferReader> {
+ let batch = read_data_buffer_to_record_batches(
self.data_part_schema.clone(),
self,
pk_weights,
- true,
self.dedup,
// replace_pk_index is always set to false since:
// - for DataBuffer in ShardBuilder, pk dict is not frozen
@@ -307,36 +298,115 @@ impl LazyMutableVectorBuilder {
}
/// Converts `DataBuffer` to record batches, with rows sorted according to pk_weights.
-/// `keep_data`: whether to keep the original data inside `DataBuffer`.
/// `dedup`: whether to true to remove the duplicated rows inside `DataBuffer`.
/// `replace_pk_index`: whether to replace the pk_index values with corresponding pk weight.
-fn data_buffer_to_record_batches(
+fn drain_data_buffer_to_record_batches(
schema: SchemaRef,
buffer: &mut DataBuffer,
pk_weights: Option<&[u16]>,
- keep_data: bool,
dedup: bool,
replace_pk_index: bool,
) -> Result<RecordBatch> {
let num_rows = buffer.ts_builder.len();
- let (pk_index_v, ts_v, sequence_v, op_type_v) = if keep_data {
- (
- buffer.pk_index_builder.finish_cloned(),
- buffer.ts_builder.to_vector_cloned(),
- buffer.sequence_builder.finish_cloned(),
- buffer.op_type_builder.finish_cloned(),
- )
- } else {
- (
- buffer.pk_index_builder.finish(),
- buffer.ts_builder.to_vector(),
- buffer.sequence_builder.finish(),
- buffer.op_type_builder.finish(),
- )
- };
+ let (pk_index_v, ts_v, sequence_v, op_type_v) = (
+ buffer.pk_index_builder.finish(),
+ buffer.ts_builder.to_vector(),
+ buffer.sequence_builder.finish(),
+ buffer.op_type_builder.finish(),
+ );
+
+ let (indices_to_take, mut columns) = build_row_sort_indices_and_columns(
+ pk_weights,
+ pk_index_v,
+ ts_v,
+ sequence_v,
+ op_type_v,
+ replace_pk_index,
+ dedup,
+ buffer.field_builders.len() + 4,
+ )?;
+
+ for b in buffer.field_builders.iter_mut() {
+ let array = match b {
+ LazyMutableVectorBuilder::Type(ty) => {
+ let mut single_null = ty.create_mutable_vector(num_rows);
+ single_null.push_nulls(num_rows);
+ single_null.to_vector().to_arrow_array()
+ }
+ LazyMutableVectorBuilder::Builder(builder) => builder.to_vector().to_arrow_array(),
+ };
+ columns.push(
+ arrow::compute::take(&array, &indices_to_take, None)
+ .context(error::ComputeArrowSnafu)?,
+ );
+ }
- let mut rows = build_rows_to_sort(pk_weights, &pk_index_v, &ts_v, &sequence_v);
+ RecordBatch::try_new(schema, columns).context(error::NewRecordBatchSnafu)
+}
+
+/// Reads `DataBuffer` to record batches, with rows sorted according to pk_weights without resetting `DataBuffer`.
+/// `dedup`: whether to true to remove the duplicated rows inside `DataBuffer`.
+/// `replace_pk_index`: whether to replace the pk_index values with corresponding pk weight.
+fn read_data_buffer_to_record_batches(
+ schema: SchemaRef,
+ buffer: &DataBuffer,
+ pk_weights: Option<&[u16]>,
+ dedup: bool,
+ replace_pk_index: bool,
+) -> Result<RecordBatch> {
+ let num_rows = buffer.ts_builder.len();
+
+ let (pk_index_v, ts_v, sequence_v, op_type_v) = (
+ buffer.pk_index_builder.finish_cloned(),
+ buffer.ts_builder.to_vector_cloned(),
+ buffer.sequence_builder.finish_cloned(),
+ buffer.op_type_builder.finish_cloned(),
+ );
+
+ let (indices_to_take, mut columns) = build_row_sort_indices_and_columns(
+ pk_weights,
+ pk_index_v,
+ ts_v,
+ sequence_v,
+ op_type_v,
+ replace_pk_index,
+ dedup,
+ buffer.field_builders.len() + 4,
+ )?;
+
+ for b in buffer.field_builders.iter() {
+ let array = match b {
+ LazyMutableVectorBuilder::Type(ty) => {
+ let mut single_null = ty.create_mutable_vector(num_rows);
+ single_null.push_nulls(num_rows);
+ single_null.to_vector().to_arrow_array()
+ }
+ LazyMutableVectorBuilder::Builder(builder) => {
+ builder.to_vector_cloned().to_arrow_array()
+ }
+ };
+ columns.push(
+ arrow::compute::take(&array, &indices_to_take, None)
+ .context(error::ComputeArrowSnafu)?,
+ );
+ }
+
+ RecordBatch::try_new(schema, columns).context(error::NewRecordBatchSnafu)
+}
+
+#[allow(clippy::too_many_arguments)]
+fn build_row_sort_indices_and_columns(
+ pk_weights: Option<&[u16]>,
+ pk_index: UInt16Vector,
+ ts: VectorRef,
+ sequence: UInt64Vector,
+ op_type: UInt8Vector,
+ replace_pk_index: bool,
+ dedup: bool,
+ column_num: usize,
+) -> Result<(UInt32Array, Vec<ArrayRef>)> {
+ let mut rows = build_rows_to_sort(pk_weights, &pk_index, &ts, &sequence);
let pk_array = if replace_pk_index {
// replace pk index values with pk weights.
@@ -344,7 +414,7 @@ fn data_buffer_to_record_batches(
rows.iter().map(|(_, key)| key.pk_weight),
)) as Arc<_>
} else {
- pk_index_v.to_arrow_array()
+ pk_index.to_arrow_array()
};
// sort and dedup
@@ -355,7 +425,7 @@ fn data_buffer_to_record_batches(
let indices_to_take = UInt32Array::from_iter_values(rows.iter().map(|(idx, _)| *idx as u32));
- let mut columns = Vec::with_capacity(4 + buffer.field_builders.len());
+ let mut columns = Vec::with_capacity(column_num);
columns.push(
arrow::compute::take(&pk_array, &indices_to_take, None)
@@ -363,42 +433,21 @@ fn data_buffer_to_record_batches(
);
columns.push(
- arrow::compute::take(&ts_v.to_arrow_array(), &indices_to_take, None)
+ arrow::compute::take(&ts.to_arrow_array(), &indices_to_take, None)
.context(error::ComputeArrowSnafu)?,
);
columns.push(
- arrow::compute::take(&sequence_v.as_arrow(), &indices_to_take, None)
+ arrow::compute::take(&sequence.as_arrow(), &indices_to_take, None)
.context(error::ComputeArrowSnafu)?,
);
columns.push(
- arrow::compute::take(&op_type_v.as_arrow(), &indices_to_take, None)
+ arrow::compute::take(&op_type.as_arrow(), &indices_to_take, None)
.context(error::ComputeArrowSnafu)?,
);
- for b in buffer.field_builders.iter_mut() {
- let array = match b {
- LazyMutableVectorBuilder::Type(ty) => {
- let mut single_null = ty.create_mutable_vector(num_rows);
- single_null.push_nulls(num_rows);
- single_null.to_vector().to_arrow_array()
- }
- LazyMutableVectorBuilder::Builder(builder) => {
- if keep_data {
- builder.to_vector_cloned().to_arrow_array()
- } else {
- builder.to_vector().to_arrow_array()
- }
- }
- };
- columns.push(
- arrow::compute::take(&array, &indices_to_take, None)
- .context(error::ComputeArrowSnafu)?,
- );
- }
-
- RecordBatch::try_new(schema, columns).context(error::NewRecordBatchSnafu)
+ Ok((indices_to_take, columns))
}
pub(crate) fn timestamp_array_to_i64_slice(arr: &ArrayRef) -> &[i64] {
@@ -468,12 +517,6 @@ impl DataBufferReader {
}
}
- /// # Panics
- /// If Current reader is exhausted.
- pub(crate) fn current_pk_index(&self) -> PkIndex {
- self.current_range.as_ref().unwrap().pk_index
- }
-
/// Advances reader to next data batch.
pub(crate) fn next(&mut self) -> Result<()> {
if self.offset >= self.batch.num_rows() {
@@ -673,11 +716,10 @@ impl<'a> DataPartEncoder<'a> {
let mut bytes = Vec::with_capacity(1024);
let mut writer = ArrowWriter::try_new(&mut bytes, self.schema.clone(), self.writer_props())
.context(error::EncodeMemtableSnafu)?;
- let rb = data_buffer_to_record_batches(
+ let rb = drain_data_buffer_to_record_batches(
self.schema.clone(),
source,
self.pk_weights,
- false,
self.dedup,
self.replace_pk_index,
)?;
@@ -695,12 +737,6 @@ pub enum DataPart {
}
impl DataPart {
- fn is_empty(&self) -> bool {
- match self {
- DataPart::Parquet(p) => p.data.is_empty(),
- }
- }
-
/// Reads frozen data part and yields [DataBatch]es.
pub fn read(&self) -> Result<DataPartReader> {
match self {
@@ -745,14 +781,6 @@ impl DataPartReader {
self.current_range.is_some()
}
- /// Returns current pk index.
- ///
- /// # Panics
- /// If reader is exhausted.
- pub(crate) fn current_pk_index(&self) -> PkIndex {
- self.current_range.as_ref().unwrap().pk_index
- }
-
/// Returns current data batch of reader.
/// # Panics
/// If reader is exhausted.
@@ -835,16 +863,10 @@ impl DataParts {
self.active.write_row(pk_index, kv)
}
- /// Freezes the active data buffer into frozen data parts.
- pub fn freeze(&mut self) -> Result<()> {
- self.frozen.push(self.active.freeze(None, false)?);
- Ok(())
- }
-
/// Reads data from all parts including active and frozen parts.
/// The returned iterator yields a record batch of one primary key at a time.
/// The order of yielding primary keys is determined by provided weights.
- pub fn read(&mut self) -> Result<DataPartsReader> {
+ pub fn read(&self) -> Result<DataPartsReader> {
let mut nodes = Vec::with_capacity(self.frozen.len() + 1);
nodes.push(DataNode::new(DataSource::Buffer(
// `DataPars::read` ensures that all pk_index inside `DataBuffer` are replaced by weights.
@@ -857,10 +879,6 @@ impl DataParts {
let merger = Merger::try_new(nodes)?;
Ok(DataPartsReader { merger })
}
-
- pub(crate) fn is_empty(&self) -> bool {
- self.active.is_empty() && self.frozen.iter().all(|part| part.is_empty())
- }
}
/// Reader for all parts inside a `DataParts`.
@@ -924,15 +942,12 @@ mod tests {
write_rows_to_buffer(&mut buffer, &meta, 0, vec![2], vec![Some(1.1)], 3);
assert_eq!(5, buffer.num_rows());
let schema = memtable_schema_to_encoded_schema(&meta);
- let batch = data_buffer_to_record_batches(
- schema,
- &mut buffer,
- Some(&[3, 1]),
- keep_data,
- true,
- true,
- )
- .unwrap();
+ let batch = if keep_data {
+ read_data_buffer_to_record_batches(schema, &buffer, Some(&[3, 1]), true, true).unwrap()
+ } else {
+ drain_data_buffer_to_record_batches(schema, &mut buffer, Some(&[3, 1]), true, true)
+ .unwrap()
+ };
assert_eq!(
vec![1, 2, 1, 2],
@@ -1036,8 +1051,7 @@ mod tests {
assert_eq!(4, buffer.num_rows());
let schema = memtable_schema_to_encoded_schema(&meta);
let batch =
- data_buffer_to_record_batches(schema, &mut buffer, Some(&[0, 1]), true, true, true)
- .unwrap();
+ read_data_buffer_to_record_batches(schema, &buffer, Some(&[0, 1]), true, true).unwrap();
assert_eq!(3, batch.num_rows());
assert_eq!(
@@ -1090,9 +1104,8 @@ mod tests {
write_rows_to_buffer(&mut buffer, &meta, 0, vec![2], vec![Some(1.1)], 3);
assert_eq!(5, buffer.num_rows());
let schema = memtable_schema_to_encoded_schema(&meta);
- let batch =
- data_buffer_to_record_batches(schema, &mut buffer, Some(&[3, 1]), true, false, true)
- .unwrap();
+ let batch = read_data_buffer_to_record_batches(schema, &buffer, Some(&[3, 1]), false, true)
+ .unwrap();
assert_eq!(
vec![1, 1, 3, 3, 3],
@@ -1289,7 +1302,7 @@ mod tests {
#[test]
fn test_iter_empty_data_buffer() {
let meta = metadata_for_test();
- let mut buffer = DataBuffer::with_capacity(meta.clone(), 10, true);
+ let buffer = DataBuffer::with_capacity(meta.clone(), 10, true);
let mut iter = buffer.read(Some(&[0, 1, 3, 2])).unwrap();
check_buffer_values_equal(&mut iter, &[]);
}
diff --git a/src/mito2/src/memtable/merge_tree/dedup.rs b/src/mito2/src/memtable/merge_tree/dedup.rs
index b5155d31d5e2..a955e3b33d1c 100644
--- a/src/mito2/src/memtable/merge_tree/dedup.rs
+++ b/src/mito2/src/memtable/merge_tree/dedup.rs
@@ -176,7 +176,7 @@ mod tests {
frozens.push(part1);
}
- let mut parts = DataParts::new(meta, 10, true).with_frozen(frozens);
+ let parts = DataParts::new(meta, 10, true).with_frozen(frozens);
let mut res = Vec::with_capacity(expected.len());
let mut reader = DedupReader::try_new(MockSource(parts.read().unwrap())).unwrap();
diff --git a/src/mito2/src/memtable/merge_tree/dict.rs b/src/mito2/src/memtable/merge_tree/dict.rs
index 543dca3d1c8e..989a59d007ac 100644
--- a/src/mito2/src/memtable/merge_tree/dict.rs
+++ b/src/mito2/src/memtable/merge_tree/dict.rs
@@ -56,11 +56,6 @@ impl KeyDictBuilder {
}
}
- /// Gets the pk index by the key.
- pub fn get_pk_index(&self, key: &[u8]) -> Option<PkIndex> {
- self.pk_to_index.get(key).copied()
- }
-
/// Returns true if the builder is full.
pub fn is_full(&self) -> bool {
self.num_keys >= self.capacity
@@ -97,6 +92,7 @@ impl KeyDictBuilder {
}
/// Memory size of the builder.
+ #[cfg(test)]
pub fn memory_size(&self) -> usize {
self.key_bytes_in_index
+ self.key_buffer.buffer_memory_size()
@@ -152,8 +148,6 @@ impl KeyDictBuilder {
pub struct DictBuilderReader {
blocks: Vec<DictBlock>,
sorted_pk_indices: Vec<PkIndex>,
- /// Current offset in the `sorted_pk_indices`.
- offset: usize,
}
impl DictBuilderReader {
@@ -161,21 +155,23 @@ impl DictBuilderReader {
Self {
blocks,
sorted_pk_indices,
- offset: 0,
}
}
/// Returns the number of keys.
+ #[cfg(test)]
pub fn num_keys(&self) -> usize {
self.sorted_pk_indices.len()
}
/// Gets the i-th pk index.
+ #[cfg(test)]
pub fn pk_index(&self, offset: usize) -> PkIndex {
self.sorted_pk_indices[offset]
}
/// Gets the i-th key.
+ #[cfg(test)]
pub fn key(&self, offset: usize) -> &[u8] {
let pk_index = self.pk_index(offset);
self.key_by_pk_index(pk_index)
@@ -191,11 +187,6 @@ impl DictBuilderReader {
pub(crate) fn pk_weights_to_sort_data(&self, pk_weights: &mut Vec<u16>) {
compute_pk_weights(&self.sorted_pk_indices, pk_weights)
}
-
- /// Returns pk indices sorted by keys.
- pub(crate) fn sorted_pk_index(&self) -> &[PkIndex] {
- &self.sorted_pk_indices
- }
}
/// Returns pk weights to sort a data part and replaces pk indices.
@@ -290,23 +281,8 @@ impl KeyBuffer {
self.key_builder.is_empty()
}
- /// Gets the primary key by its index.
- ///
- /// # Panics
- /// Panics if the index is invalid.
- fn get_key(&self, index: PkIndex) -> &[u8] {
- let values = self.key_builder.values_slice();
- let offsets = self.key_builder.offsets_slice();
- // Casting index to usize is safe.
- let start = offsets[index as usize];
- let end = offsets[index as usize + 1];
-
- // We ensure no null in the builder so we don't check validity.
- // The builder offset should be positive.
- &values[start as usize..end as usize]
- }
-
/// Returns the buffer size of the builder.
+ #[cfg(test)]
fn buffer_memory_size(&self) -> usize {
self.key_builder.values_slice().len()
+ std::mem::size_of_val(self.key_builder.offsets_slice())
@@ -351,15 +327,12 @@ impl DictBlock {
Self { keys }
}
- fn len(&self) -> usize {
- self.keys.len()
- }
-
fn key_by_pk_index(&self, index: PkIndex) -> &[u8] {
let pos = index % MAX_KEYS_PER_BLOCK;
self.keys.value(pos as usize)
}
+ #[cfg(test)]
fn buffer_memory_size(&self) -> usize {
self.keys.get_buffer_memory_size()
}
diff --git a/src/mito2/src/memtable/merge_tree/partition.rs b/src/mito2/src/memtable/merge_tree/partition.rs
index 6d124b3cfc09..d58c26682654 100644
--- a/src/mito2/src/memtable/merge_tree/partition.rs
+++ b/src/mito2/src/memtable/merge_tree/partition.rs
@@ -105,13 +105,12 @@ impl Partition {
/// Scans data in the partition.
pub fn read(&self, mut context: ReadPartitionContext) -> Result<PartitionReader> {
- // TODO(yingwen): Change to acquire read lock if `read()` takes `&self`.
let nodes = {
- let mut inner = self.inner.write().unwrap();
+ let inner = self.inner.read().unwrap();
let mut nodes = Vec::with_capacity(inner.shards.len() + 1);
let bulder_reader = inner.shard_builder.read(&mut context.pk_weights)?;
nodes.push(ShardNode::new(ShardSource::Builder(bulder_reader)));
- for shard in &mut inner.shards {
+ for shard in &inner.shards {
let shard_reader = shard.read()?;
nodes.push(ShardNode::new(ShardSource::Shard(shard_reader)));
}
diff --git a/src/mito2/src/memtable/merge_tree/shard.rs b/src/mito2/src/memtable/merge_tree/shard.rs
index 8b83b2ad1e61..e0dcad9989f7 100644
--- a/src/mito2/src/memtable/merge_tree/shard.rs
+++ b/src/mito2/src/memtable/merge_tree/shard.rs
@@ -72,7 +72,7 @@ impl Shard {
/// Scans the shard.
// TODO(yingwen): Push down projection to data parts.
- pub fn read(&mut self) -> Result<ShardReader> {
+ pub fn read(&self) -> Result<ShardReader> {
let parts_reader = self.data_parts.read()?;
Ok(ShardReader {
@@ -136,10 +136,6 @@ pub struct ShardReader {
}
impl ShardReader {
- fn shard_id(&self) -> ShardId {
- self.shard_id
- }
-
fn is_valid(&self) -> bool {
self.parts_reader.is_valid()
}
diff --git a/src/mito2/src/memtable/merge_tree/shard_builder.rs b/src/mito2/src/memtable/merge_tree/shard_builder.rs
index d48310409b40..c63a1b9f261c 100644
--- a/src/mito2/src/memtable/merge_tree/shard_builder.rs
+++ b/src/mito2/src/memtable/merge_tree/shard_builder.rs
@@ -106,7 +106,7 @@ impl ShardBuilder {
}
/// Scans the shard builder.
- pub fn read(&mut self, pk_weights_buffer: &mut Vec<u16>) -> Result<ShardBuilderReader> {
+ pub fn read(&self, pk_weights_buffer: &mut Vec<u16>) -> Result<ShardBuilderReader> {
let dict_reader = self.dict_builder.read();
dict_reader.pk_weights_to_sort_data(pk_weights_buffer);
let data_reader = self.data_buffer.read(Some(pk_weights_buffer))?;
@@ -132,10 +132,6 @@ pub struct ShardBuilderReader {
}
impl ShardBuilderReader {
- pub fn shard_id(&self) -> ShardId {
- self.shard_id
- }
-
pub fn is_valid(&self) -> bool {
self.data_reader.is_valid()
}
@@ -164,14 +160,12 @@ impl ShardBuilderReader {
#[cfg(test)]
mod tests {
- use std::sync::Arc;
use super::*;
- use crate::memtable::merge_tree::dict::KeyDictBuilder;
use crate::memtable::merge_tree::metrics::WriteMetrics;
use crate::memtable::KeyValues;
use crate::test_util::memtable_util::{
- build_key_values_with_ts_seq_values, encode_key_by_kv, encode_keys, metadata_for_test,
+ build_key_values_with_ts_seq_values, encode_key_by_kv, metadata_for_test,
};
fn input_with_key(metadata: &RegionMetadataRef) -> Vec<KeyValues> {
@@ -203,27 +197,6 @@ mod tests {
]
}
- fn new_shard_builder(
- shard_id: ShardId,
- metadata: RegionMetadataRef,
- input: &[KeyValues],
- ) -> Shard {
- let mut dict_builder = KeyDictBuilder::new(1024);
- let mut metrics = WriteMetrics::default();
- let mut keys = Vec::with_capacity(input.len());
- for kvs in input {
- encode_keys(&metadata, kvs, &mut keys);
- }
- for key in &keys {
- dict_builder.insert_key(key, &mut metrics);
- }
-
- let dict = dict_builder.finish().unwrap();
- let data_parts = DataParts::new(metadata, DATA_INIT_CAP, true);
-
- Shard::new(shard_id, Some(Arc::new(dict)), data_parts, true)
- }
-
#[test]
fn test_write_shard_builder() {
let metadata = metadata_for_test();
diff --git a/src/mito2/src/memtable/merge_tree/tree.rs b/src/mito2/src/memtable/merge_tree/tree.rs
index afa79463e591..7e19a6945432 100644
--- a/src/mito2/src/memtable/merge_tree/tree.rs
+++ b/src/mito2/src/memtable/merge_tree/tree.rs
@@ -21,8 +21,6 @@ use api::v1::OpType;
use common_recordbatch::filter::SimpleFilterEvaluator;
use common_time::Timestamp;
use datafusion_common::ScalarValue;
-use datatypes::arrow;
-use datatypes::data_type::ConcreteDataType;
use snafu::ensure;
use store_api::metadata::RegionMetadataRef;
use store_api::storage::ColumnId;
@@ -35,7 +33,6 @@ use crate::memtable::merge_tree::partition::{
Partition, PartitionKey, PartitionReader, PartitionRef, ReadPartitionContext,
};
use crate::memtable::merge_tree::MergeTreeConfig;
-use crate::memtable::time_series::primary_key_schema;
use crate::memtable::{BoxedBatchIterator, KeyValues};
use crate::read::Batch;
use crate::row_converter::{McmpRowCodec, RowCodec, SortField};
@@ -144,18 +141,8 @@ impl MergeTree {
.unwrap_or_default();
let partitions = self.prune_partitions(&filters);
- let pk_schema = primary_key_schema(&self.metadata);
- let pk_datatypes = self
- .metadata
- .primary_key_columns()
- .map(|pk| pk.column_schema.data_type.clone())
- .collect();
let mut iter = TreeIter {
- metadata: self.metadata.clone(),
- pk_schema,
- pk_datatypes,
- row_codec: self.row_codec.clone(),
partitions,
current_reader: None,
};
@@ -283,10 +270,6 @@ impl MergeTree {
}
struct TreeIter {
- metadata: RegionMetadataRef,
- pk_schema: arrow::datatypes::SchemaRef,
- pk_datatypes: Vec<ConcreteDataType>,
- row_codec: Arc<McmpRowCodec>,
partitions: VecDeque<PartitionRef>,
current_reader: Option<PartitionReader>,
}
|
refactor
|
change the receivers of merge tree components (#3378)
|
c07a1babd5a6cb6b35c5da3efc72dc597c7a6e42
|
2024-05-08 11:43:33
|
Lei, HUANG
|
refactor(logstore): remove Entry::namemspace (#3875)
| false
|
diff --git a/src/log-store/src/kafka.rs b/src/log-store/src/kafka.rs
index d80a19d5c38f..07a21596cbf6 100644
--- a/src/log-store/src/kafka.rs
+++ b/src/log-store/src/kafka.rs
@@ -57,7 +57,6 @@ pub struct EntryImpl {
impl Entry for EntryImpl {
type Error = Error;
- type Namespace = NamespaceImpl;
fn data(&self) -> &[u8] {
&self.data
@@ -67,10 +66,6 @@ impl Entry for EntryImpl {
self.id
}
- fn namespace(&self) -> Self::Namespace {
- self.ns.clone()
- }
-
fn estimated_size(&self) -> usize {
size_of::<Self>() + self.data.capacity() * size_of::<u8>() + self.ns.topic.capacity()
}
diff --git a/src/log-store/src/noop.rs b/src/log-store/src/noop.rs
index 1d28e67dce16..ded005ec7981 100644
--- a/src/log-store/src/noop.rs
+++ b/src/log-store/src/noop.rs
@@ -37,7 +37,6 @@ impl Namespace for NamespaceImpl {
impl Entry for EntryImpl {
type Error = Error;
- type Namespace = NamespaceImpl;
fn data(&self) -> &[u8] {
&[]
@@ -47,10 +46,6 @@ impl Entry for EntryImpl {
0
}
- fn namespace(&self) -> Self::Namespace {
- Default::default()
- }
-
fn estimated_size(&self) -> usize {
0
}
diff --git a/src/log-store/src/raft_engine.rs b/src/log-store/src/raft_engine.rs
index 49082acab041..e7a6f6b0ca16 100644
--- a/src/log-store/src/raft_engine.rs
+++ b/src/log-store/src/raft_engine.rs
@@ -68,7 +68,6 @@ impl Namespace for NamespaceImpl {
impl Entry for EntryImpl {
type Error = Error;
- type Namespace = NamespaceImpl;
fn data(&self) -> &[u8] {
self.data.as_slice()
@@ -78,31 +77,20 @@ impl Entry for EntryImpl {
self.id
}
- fn namespace(&self) -> Self::Namespace {
- NamespaceImpl {
- id: self.namespace_id,
- ..Default::default()
- }
- }
-
fn estimated_size(&self) -> usize {
- size_of::<Self>() + self.data.capacity() * size_of::<u8>()
+ self.data.len() + size_of::<u64>() + size_of::<u64>()
}
}
#[cfg(test)]
mod tests {
- use std::mem::size_of;
-
use store_api::logstore::entry::Entry;
use crate::raft_engine::protos::logstore::EntryImpl;
#[test]
fn test_estimated_size() {
- let entry = EntryImpl::create(1, 1, Vec::with_capacity(100));
- let expected = size_of::<EntryImpl>() + 100;
- let got = entry.estimated_size();
- assert_eq!(expected, got);
+ let entry = EntryImpl::create(1, 1, b"hello, world".to_vec());
+ assert_eq!(28, entry.estimated_size());
}
}
diff --git a/src/store-api/src/logstore/entry.rs b/src/store-api/src/logstore/entry.rs
index 671f55ac35a2..daac2df4c9af 100644
--- a/src/store-api/src/logstore/entry.rs
+++ b/src/store-api/src/logstore/entry.rs
@@ -14,8 +14,6 @@
use common_error::ext::ErrorExt;
-use crate::logstore::namespace::Namespace;
-
/// An entry's id.
/// Different log store implementations may interpret the id to different meanings.
pub type Id = u64;
@@ -24,7 +22,6 @@ pub type Id = u64;
/// The log store implementation may have larger or smaller data storage unit than an entry.
pub trait Entry: Send + Sync {
type Error: ErrorExt + Send + Sync;
- type Namespace: Namespace;
/// Returns the contained data of the entry.
fn data(&self) -> &[u8];
@@ -33,9 +30,6 @@ pub trait Entry: Send + Sync {
/// Usually the namespace id is identical with the region id.
fn id(&self) -> Id;
- /// Returns the namespace of the entry.
- fn namespace(&self) -> Self::Namespace;
-
- /// Computes the estimated size in bytes of the entry.
+ /// Computes the estimated encoded size.
fn estimated_size(&self) -> usize;
}
diff --git a/src/store-api/src/logstore/entry_stream.rs b/src/store-api/src/logstore/entry_stream.rs
index 23d131e451aa..5f26133ada8a 100644
--- a/src/store-api/src/logstore/entry_stream.rs
+++ b/src/store-api/src/logstore/entry_stream.rs
@@ -31,7 +31,6 @@ pub type SendableEntryStream<'a, I, E> = Pin<Box<dyn Stream<Item = Result<Vec<I>
#[cfg(test)]
mod tests {
use std::any::Any;
- use std::mem::size_of;
use std::task::{Context, Poll};
use common_error::ext::StackError;
@@ -50,15 +49,6 @@ mod tests {
#[snafu(visibility(pub))]
pub struct Error {}
- #[derive(Debug, Clone, Eq, PartialEq, Hash)]
- pub struct Namespace {}
-
- impl crate::logstore::Namespace for Namespace {
- fn id(&self) -> crate::logstore::namespace::Id {
- 0
- }
- }
-
impl ErrorExt for Error {
fn as_any(&self) -> &dyn Any {
self
@@ -75,7 +65,6 @@ mod tests {
impl Entry for SimpleEntry {
type Error = Error;
- type Namespace = Namespace;
fn data(&self) -> &[u8] {
&self.data
@@ -85,12 +74,8 @@ mod tests {
0u64
}
- fn namespace(&self) -> Self::Namespace {
- Namespace {}
- }
-
fn estimated_size(&self) -> usize {
- self.data.capacity() * size_of::<u8>()
+ self.data.len()
}
}
|
refactor
|
remove Entry::namemspace (#3875)
|
777a3182c52224f4a4bc8a309c9aa9a407c998e1
|
2023-01-09 09:07:43
|
Xuanwo
|
feat: Bump OpenDAL to 0.24 for better seekable support (#847)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 191ae2dd0fea..529d3a1573e4 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4393,9 +4393,9 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
[[package]]
name = "opendal"
-version = "0.22.5"
+version = "0.24.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b0d1ff77f4919836ec2002b7b42366722b2856c2b718102d3d1cd58db5e56e3e"
+checksum = "97541724cf371973b28f5a873404f2a2a4f7bb1efe7ca36a27836c13958781c2"
dependencies = [
"anyhow",
"async-compat",
@@ -5401,9 +5401,9 @@ dependencies = [
[[package]]
name = "quick-xml"
-version = "0.26.0"
+version = "0.27.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7f50b1c63b38611e7d4d7f68b82d3ad0cc71a2ad2e7f61fc10f1328d917c93cd"
+checksum = "ffc053f057dd768a56f62cd7e434c42c831d296968997e9ac1f76ea7c2d14c41"
dependencies = [
"memchr",
"serde",
@@ -5664,9 +5664,9 @@ dependencies = [
[[package]]
name = "reqsign"
-version = "0.7.1"
+version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "64e209415378d7a5e169615faee53d9961ee1f1046d9d00991045a6a2de9f3f6"
+checksum = "1c97ac0f771c78ddf4bcb73c8454c76565a7249780e7296767f7e89661b0e045"
dependencies = [
"anyhow",
"backon",
diff --git a/src/object-store/Cargo.toml b/src/object-store/Cargo.toml
index 17ea74510380..0c7ce0026311 100644
--- a/src/object-store/Cargo.toml
+++ b/src/object-store/Cargo.toml
@@ -6,7 +6,7 @@ license.workspace = true
[dependencies]
futures = { version = "0.3" }
-opendal = { version = "0.22", features = ["layers-tracing", "layers-metrics"] }
+opendal = { version = "0.24", features = ["layers-tracing", "layers-metrics"] }
tokio.workspace = true
[dev-dependencies]
diff --git a/src/object-store/src/lib.rs b/src/object-store/src/lib.rs
index 9ac89f0c7ca2..1079a9f4d1db 100644
--- a/src/object-store/src/lib.rs
+++ b/src/object-store/src/lib.rs
@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-pub use opendal::raw::SeekableReader;
pub use opendal::{
layers, services, Error, ErrorKind, Layer, Object, ObjectLister, ObjectMetadata, ObjectMode,
Operator as ObjectStore, Result,
diff --git a/src/storage/src/sst/parquet.rs b/src/storage/src/sst/parquet.rs
index d48306d97fdd..8055a7c15621 100644
--- a/src/storage/src/sst/parquet.rs
+++ b/src/storage/src/sst/parquet.rs
@@ -33,7 +33,8 @@ use table::predicate::Predicate;
use tokio::io::BufReader;
use crate::error::{
- self, NewRecordBatchSnafu, ReadParquetSnafu, Result, WriteObjectSnafu, WriteParquetSnafu,
+ self, NewRecordBatchSnafu, ReadObjectSnafu, ReadParquetSnafu, Result, WriteObjectSnafu,
+ WriteParquetSnafu,
};
use crate::memtable::BoxedBatchIterator;
use crate::read::{Batch, BatchReader};
@@ -140,7 +141,14 @@ impl<'a> ParquetReader<'a> {
pub async fn chunk_stream(&self) -> Result<ChunkStream> {
let operator = self.object_store.clone();
- let reader = operator.object(self.file_path).seekable_reader(..).compat();
+ let reader = operator
+ .object(self.file_path)
+ .reader()
+ .await
+ .context(ReadObjectSnafu {
+ path: self.file_path,
+ })?
+ .compat();
let buf_reader = BufReader::new(reader);
let builder = ParquetRecordBatchStreamBuilder::new(buf_reader)
.await
@@ -273,7 +281,9 @@ mod tests {
let reader = BufReader::new(
object_store
.object(sst_file_name)
- .seekable_reader(..)
+ .reader()
+ .await
+ .unwrap()
.compat(),
);
|
feat
|
Bump OpenDAL to 0.24 for better seekable support (#847)
|
ae8153515bd54a5e347dadc9fbf1aca1f2678bf4
|
2023-11-29 16:40:38
|
Weny Xu
|
feat: add update metadata step for upgrading candidate region (#2811)
| false
|
diff --git a/src/common/meta/src/key.rs b/src/common/meta/src/key.rs
index 39a942d5f3bd..850051fa0b93 100644
--- a/src/common/meta/src/key.rs
+++ b/src/common/meta/src/key.rs
@@ -584,7 +584,7 @@ impl TableMetadataManager {
&self,
table_id: TableId,
region_info: RegionInfo,
- current_table_route_value: DeserializedValueWithBytes<TableRouteValue>,
+ current_table_route_value: &DeserializedValueWithBytes<TableRouteValue>,
new_region_routes: Vec<RegionRoute>,
new_region_options: &HashMap<String, String>,
) -> Result<()> {
@@ -606,7 +606,7 @@ impl TableMetadataManager {
let (update_table_route_txn, on_update_table_route_failure) = self
.table_route_manager()
- .build_update_txn(table_id, ¤t_table_route_value, &new_table_route_value)?;
+ .build_update_txn(table_id, current_table_route_value, &new_table_route_value)?;
let txn = Txn::merge_all(vec![update_datanode_table_txn, update_table_route_txn]);
@@ -1173,7 +1173,7 @@ mod tests {
region_storage_path: region_storage_path.to_string(),
region_options: HashMap::new(),
},
- current_table_route_value.clone(),
+ ¤t_table_route_value,
new_region_routes.clone(),
&HashMap::new(),
)
@@ -1190,7 +1190,7 @@ mod tests {
region_storage_path: region_storage_path.to_string(),
region_options: HashMap::new(),
},
- current_table_route_value.clone(),
+ ¤t_table_route_value,
new_region_routes.clone(),
&HashMap::new(),
)
@@ -1212,7 +1212,7 @@ mod tests {
region_storage_path: region_storage_path.to_string(),
region_options: HashMap::new(),
},
- current_table_route_value.clone(),
+ ¤t_table_route_value,
new_region_routes.clone(),
&HashMap::new(),
)
@@ -1237,7 +1237,7 @@ mod tests {
region_storage_path: region_storage_path.to_string(),
region_options: HashMap::new(),
},
- wrong_table_route_value,
+ &wrong_table_route_value,
new_region_routes,
&HashMap::new(),
)
diff --git a/src/meta-srv/src/error.rs b/src/meta-srv/src/error.rs
index 3a007b3163c4..abd60a4947cf 100644
--- a/src/meta-srv/src/error.rs
+++ b/src/meta-srv/src/error.rs
@@ -298,6 +298,12 @@ pub enum Error {
location: Location,
},
+ #[snafu(display("Failed to find table route for {region_id}"))]
+ RegionRouteNotFound {
+ region_id: RegionId,
+ location: Location,
+ },
+
#[snafu(display("Table info not found: {}", table_id))]
TableInfoNotFound {
table_id: TableId,
@@ -658,7 +664,8 @@ impl ErrorExt for Error {
| Error::Unexpected { .. }
| Error::Txn { .. }
| Error::TableIdChanged { .. }
- | Error::RegionOpeningRace { .. } => StatusCode::Unexpected,
+ | Error::RegionOpeningRace { .. }
+ | Error::RegionRouteNotFound { .. } => StatusCode::Unexpected,
Error::TableNotFound { .. } => StatusCode::TableNotFound,
Error::InvalidateTableCache { source, .. } => source.status_code(),
Error::RequestDatanode { source, .. } => source.status_code(),
diff --git a/src/meta-srv/src/lib.rs b/src/meta-srv/src/lib.rs
index b30c6779b36a..6515ade81ebe 100644
--- a/src/meta-srv/src/lib.rs
+++ b/src/meta-srv/src/lib.rs
@@ -15,6 +15,8 @@
#![feature(async_closure)]
#![feature(result_flattening)]
#![feature(assert_matches)]
+#![feature(option_take_if)]
+#![feature(extract_if)]
pub mod bootstrap;
mod cache_invalidator;
diff --git a/src/meta-srv/src/procedure/region_failover/update_metadata.rs b/src/meta-srv/src/procedure/region_failover/update_metadata.rs
index 22c1a89b542e..225588ac0942 100644
--- a/src/meta-srv/src/procedure/region_failover/update_metadata.rs
+++ b/src/meta-srv/src/procedure/region_failover/update_metadata.rs
@@ -105,7 +105,7 @@ impl UpdateRegionMetadata {
region_storage_path: self.region_storage_path.to_string(),
region_options: self.region_options.clone(),
},
- table_route_value,
+ &table_route_value,
new_region_routes,
&self.region_options,
)
diff --git a/src/meta-srv/src/procedure/region_migration.rs b/src/meta-srv/src/procedure/region_migration.rs
index 295aab78acbd..d5ae7235c79d 100644
--- a/src/meta-srv/src/procedure/region_migration.rs
+++ b/src/meta-srv/src/procedure/region_migration.rs
@@ -25,6 +25,7 @@ use std::any::Any;
use std::fmt::Debug;
use std::time::Duration;
+use common_meta::key::table_info::TableInfoValue;
use common_meta::key::table_route::TableRouteValue;
use common_meta::key::{DeserializedValueWithBytes, TableMetadataManagerRef};
use common_meta::peer::Peer;
@@ -81,8 +82,13 @@ pub struct VolatileContext {
/// the corresponding [RegionRoute](common_meta::rpc::router::RegionRoute) of the opening region
/// was written into [TableRouteValue](common_meta::key::table_route::TableRouteValue).
opening_region_guard: Option<OpeningRegionGuard>,
- /// `table_route_info` is stored via previous steps for future use.
- table_route_info: Option<DeserializedValueWithBytes<TableRouteValue>>,
+ /// `table_route` is stored via previous steps for future use.
+ table_route: Option<DeserializedValueWithBytes<TableRouteValue>>,
+ /// `table_info` is stored via previous steps for future use.
+ ///
+ /// `table_info` should remain unchanged during the procedure;
+ /// no other DDL procedure executed concurrently for the current table.
+ table_info: Option<DeserializedValueWithBytes<TableInfoValue>>,
/// The deadline of leader region lease.
leader_region_lease_deadline: Option<Instant>,
/// The last_entry_id of leader region.
@@ -153,7 +159,7 @@ impl Context {
&self.server_addr
}
- /// Returns the `table_route_value` of [VolatileContext] if any.
+ /// Returns the `table_route` of [VolatileContext] if any.
/// Otherwise, returns the value retrieved from remote.
///
/// Retry:
@@ -161,7 +167,7 @@ impl Context {
pub async fn get_table_route_value(
&mut self,
) -> Result<&DeserializedValueWithBytes<TableRouteValue>> {
- let table_route_value = &mut self.volatile_ctx.table_route_info;
+ let table_route_value = &mut self.volatile_ctx.table_route;
if table_route_value.is_none() {
let table_id = self.persistent_ctx.region_id.table_id();
@@ -183,9 +189,45 @@ impl Context {
Ok(table_route_value.as_ref().unwrap())
}
- /// Removes the `table_route_value` of [VolatileContext], returns true if any.
+ /// Removes the `table_route` of [VolatileContext], returns true if any.
pub fn remove_table_route_value(&mut self) -> bool {
- let value = self.volatile_ctx.table_route_info.take();
+ let value = self.volatile_ctx.table_route.take();
+ value.is_some()
+ }
+
+ /// Returns the `table_info` of [VolatileContext] if any.
+ /// Otherwise, returns the value retrieved from remote.
+ ///
+ /// Retry:
+ /// - Failed to retrieve the metadata of table.
+ pub async fn get_table_info_value(
+ &mut self,
+ ) -> Result<&DeserializedValueWithBytes<TableInfoValue>> {
+ let table_info_value = &mut self.volatile_ctx.table_info;
+
+ if table_info_value.is_none() {
+ let table_id = self.persistent_ctx.region_id.table_id();
+ let table_info = self
+ .table_metadata_manager
+ .table_info_manager()
+ .get(table_id)
+ .await
+ .context(error::TableMetadataManagerSnafu)
+ .map_err(|e| error::Error::RetryLater {
+ reason: e.to_string(),
+ location: location!(),
+ })?
+ .context(error::TableInfoNotFoundSnafu { table_id })?;
+
+ *table_info_value = Some(table_info);
+ }
+
+ Ok(table_info_value.as_ref().unwrap())
+ }
+
+ /// Removes the `table_info` of [VolatileContext], returns true if any.
+ pub fn remove_table_info_value(&mut self) -> bool {
+ let value = self.volatile_ctx.table_info.take();
value.is_some()
}
diff --git a/src/meta-srv/src/procedure/region_migration/migration_start.rs b/src/meta-srv/src/procedure/region_migration/migration_start.rs
index 47eb0abb980f..b10a26886aec 100644
--- a/src/meta-srv/src/procedure/region_migration/migration_start.rs
+++ b/src/meta-srv/src/procedure/region_migration/migration_start.rs
@@ -137,16 +137,11 @@ mod tests {
use super::*;
use crate::error::Error;
- use crate::procedure::region_migration::test_util::TestingEnv;
+ use crate::procedure::region_migration::test_util::{self, TestingEnv};
use crate::procedure::region_migration::{ContextFactory, PersistentContext};
fn new_persistent_context() -> PersistentContext {
- PersistentContext {
- from_peer: Peer::empty(1),
- to_peer: Peer::empty(2),
- region_id: RegionId::new(1024, 1),
- cluster_id: 0,
- }
+ test_util::new_persistent_context(1, 2, RegionId::new(1024, 1))
}
#[tokio::test]
diff --git a/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs b/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs
index fc4d9c7d9a45..2e126625347f 100644
--- a/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs
+++ b/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs
@@ -21,7 +21,7 @@ use common_meta::ddl::utils::region_storage_path;
use common_meta::instruction::{Instruction, InstructionReply, OpenRegion, SimpleReply};
use common_meta::RegionIdent;
use serde::{Deserialize, Serialize};
-use snafu::{location, Location, OptionExt, ResultExt};
+use snafu::{OptionExt, ResultExt};
use crate::error::{self, Result};
use crate::handler::HeartbeatMailbox;
@@ -54,38 +54,28 @@ impl OpenCandidateRegion {
///
/// Abort(non-retry):
/// - Table Info is not found.
- async fn build_open_region_instruction(&self, ctx: &Context) -> Result<Instruction> {
+ async fn build_open_region_instruction(&self, ctx: &mut Context) -> Result<Instruction> {
let pc = &ctx.persistent_ctx;
let cluster_id = pc.cluster_id;
let table_id = pc.region_id.table_id();
let region_number = pc.region_id.region_number();
- let candidate = &pc.to_peer;
- let table_info = ctx
- .table_metadata_manager
- .table_info_manager()
- .get(table_id)
- .await
- .context(error::TableMetadataManagerSnafu)
- .map_err(|e| error::Error::RetryLater {
- reason: e.to_string(),
- location: location!(),
- })?
- .context(error::TableInfoNotFoundSnafu { table_id })?
- .into_inner()
- .table_info;
+ let candidate_id = pc.to_peer.id;
+
+ let table_info_value = ctx.get_table_info_value().await?;
+ let table_info = &table_info_value.table_info;
// The region storage path is immutable after the region is created.
// Therefore, it's safe to store it in `VolatileContext` for future use.
let region_storage_path =
region_storage_path(&table_info.catalog_name, &table_info.schema_name);
- let engine = table_info.meta.engine;
+ let engine = table_info.meta.engine.clone();
let region_options: HashMap<String, String> = (&table_info.meta.options).into();
let open_instruction = Instruction::OpenRegion(OpenRegion::new(
RegionIdent {
cluster_id,
- datanode_id: candidate.id,
+ datanode_id: candidate_id,
table_id,
region_number,
engine,
@@ -198,17 +188,12 @@ mod tests {
use crate::error::Error;
use crate::procedure::region_migration::downgrade_leader_region::DowngradeLeaderRegion;
use crate::procedure::region_migration::test_util::{
- new_close_region_reply, send_mock_reply, TestingEnv,
+ self, new_close_region_reply, send_mock_reply, TestingEnv,
};
use crate::procedure::region_migration::{ContextFactory, PersistentContext};
fn new_persistent_context() -> PersistentContext {
- PersistentContext {
- from_peer: Peer::empty(1),
- to_peer: Peer::empty(2),
- region_id: RegionId::new(1024, 1),
- cluster_id: 0,
- }
+ test_util::new_persistent_context(1, 2, RegionId::new(1024, 1))
}
fn new_mock_open_instruction(datanode_id: DatanodeId, region_id: RegionId) -> Instruction {
@@ -244,9 +229,12 @@ mod tests {
let state = OpenCandidateRegion;
let persistent_context = new_persistent_context();
let env = TestingEnv::new();
- let ctx = env.context_factory().new_context(persistent_context);
+ let mut ctx = env.context_factory().new_context(persistent_context);
- let err = state.build_open_region_instruction(&ctx).await.unwrap_err();
+ let err = state
+ .build_open_region_instruction(&mut ctx)
+ .await
+ .unwrap_err();
assert_matches!(err, Error::TableInfoNotFound { .. });
assert!(!err.is_retryable());
diff --git a/src/meta-srv/src/procedure/region_migration/test_util.rs b/src/meta-srv/src/procedure/region_migration/test_util.rs
index 277c9f8d90fa..cc3779b8f54d 100644
--- a/src/meta-srv/src/procedure/region_migration/test_util.rs
+++ b/src/meta-srv/src/procedure/region_migration/test_util.rs
@@ -19,16 +19,19 @@ use api::v1::meta::{HeartbeatResponse, MailboxMessage, RequestHeader};
use common_meta::instruction::{InstructionReply, SimpleReply};
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::memory::MemoryKvBackend;
+use common_meta::peer::Peer;
use common_meta::sequence::Sequence;
use common_meta::DatanodeId;
use common_procedure::{Context as ProcedureContext, ProcedureId};
use common_procedure_test::MockContextProvider;
use common_time::util::current_time_millis;
+use store_api::storage::RegionId;
use tokio::sync::mpsc::{Receiver, Sender};
use super::ContextFactoryImpl;
use crate::error::Result;
use crate::handler::{HeartbeatMailbox, Pusher, Pushers};
+use crate::procedure::region_migration::PersistentContext;
use crate::region::lease_keeper::{OpeningRegionKeeper, OpeningRegionKeeperRef};
use crate::service::mailbox::{Channel, MailboxRef};
@@ -127,6 +130,7 @@ impl TestingEnv {
}
}
+/// Generates a [InstructionReply::CloseRegion] reply.
pub fn new_close_region_reply(id: u64) -> MailboxMessage {
MailboxMessage {
id,
@@ -144,6 +148,7 @@ pub fn new_close_region_reply(id: u64) -> MailboxMessage {
}
}
+/// Sends a mock reply.
pub fn send_mock_reply(
mailbox: MailboxRef,
mut rx: MockHeartbeatReceiver,
@@ -155,3 +160,13 @@ pub fn send_mock_reply(
mailbox.on_recv(reply_id, msg(reply_id)).await.unwrap();
});
}
+
+/// Generates a [PersistentContext].
+pub fn new_persistent_context(from: u64, to: u64, region_id: RegionId) -> PersistentContext {
+ PersistentContext {
+ from_peer: Peer::empty(from),
+ to_peer: Peer::empty(to),
+ region_id,
+ cluster_id: 0,
+ }
+}
diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata.rs b/src/meta-srv/src/procedure/region_migration/update_metadata.rs
index 9c8d4b85b7e5..ba3092548efb 100644
--- a/src/meta-srv/src/procedure/region_migration/update_metadata.rs
+++ b/src/meta-srv/src/procedure/region_migration/update_metadata.rs
@@ -12,22 +12,25 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+pub(crate) mod downgrade_leader_region;
+pub(crate) mod upgrade_candidate_region;
+
use std::any::Any;
-use std::time::Duration;
-use common_meta::distributed_time_constants::REGION_LEASE_SECS;
-use common_meta::rpc::router::RegionStatus;
use serde::{Deserialize, Serialize};
-use snafu::ResultExt;
-use crate::error::{self, Result};
+use super::migration_end::RegionMigrationEnd;
+use crate::error::Result;
use crate::procedure::region_migration::downgrade_leader_region::DowngradeLeaderRegion;
use crate::procedure::region_migration::{Context, State};
#[derive(Debug, Serialize, Deserialize)]
#[serde(tag = "UpdateMetadata")]
pub enum UpdateMetadata {
+ /// Downgrades the leader region.
Downgrade,
+ /// Upgrade the candidate region.
+ Upgrade,
}
#[async_trait::async_trait]
@@ -40,6 +43,12 @@ impl State for UpdateMetadata {
Ok(Box::<DowngradeLeaderRegion>::default())
}
+ UpdateMetadata::Upgrade => {
+ self.upgrade_candidate_region(ctx).await?;
+
+ // TODO(weny): invalidate fe cache.
+ Ok(Box::new(RegionMigrationEnd))
+ }
}
}
@@ -47,198 +56,3 @@ impl State for UpdateMetadata {
self
}
}
-
-impl UpdateMetadata {
- /// Downgrades the leader region.
- ///
- /// Abort(non-retry):
- /// - TableRoute is not found.
- ///
- /// Retry:
- /// - Failed to update [TableRouteValue](common_meta::key::table_region::TableRegionValue).
- /// - Failed to retrieve the metadata of table.
- ///
- /// About the failure of updating the [TableRouteValue](common_meta::key::table_region::TableRegionValue):
- ///
- /// - There may be another [RegionMigrationProcedure](crate::procedure::region_migration::RegionMigrationProcedure)
- /// that is executed concurrently for **other region**.
- /// It will only update **other region** info. Therefore, It's safe to retry after failure.
- ///
- /// - There is no other DDL procedure executed concurrently for the current table.
- async fn downgrade_leader_region(&self, ctx: &mut Context) -> Result<()> {
- let table_metadata_manager = ctx.table_metadata_manager.clone();
- let region_id = ctx.region_id();
- let table_id = region_id.table_id();
- let current_table_route_value = ctx.get_table_route_value().await?;
-
- if let Err(err) = table_metadata_manager
- .update_leader_region_status(table_id, current_table_route_value, |route| {
- if route.region.id == region_id {
- Some(Some(RegionStatus::Downgraded))
- } else {
- None
- }
- })
- .await
- .context(error::TableMetadataManagerSnafu)
- {
- debug_assert!(ctx.remove_table_route_value());
- return error::RetryLaterSnafu {
- reason: format!("Failed to update the table route during the downgrading leader region, error: {err}")
- }.fail();
- }
-
- debug_assert!(ctx.remove_table_route_value());
-
- ctx.volatile_ctx
- .set_leader_region_lease_deadline(Duration::from_secs(REGION_LEASE_SECS));
-
- Ok(())
- }
-}
-
-#[cfg(test)]
-mod tests {
- use std::assert_matches::assert_matches;
-
- use common_meta::key::test_utils::new_test_table_info;
- use common_meta::peer::Peer;
- use common_meta::rpc::router::{Region, RegionRoute};
- use store_api::storage::RegionId;
-
- use super::*;
- use crate::error::Error;
- use crate::procedure::region_migration::test_util::TestingEnv;
- use crate::procedure::region_migration::{ContextFactory, PersistentContext};
-
- fn new_persistent_context() -> PersistentContext {
- PersistentContext {
- from_peer: Peer::empty(1),
- to_peer: Peer::empty(2),
- region_id: RegionId::new(1024, 1),
- cluster_id: 0,
- }
- }
-
- #[test]
- fn test_state_serialization() {
- let state = UpdateMetadata::Downgrade;
- let expected = r#"{"UpdateMetadata":"Downgrade"}"#;
- assert_eq!(expected, serde_json::to_string(&state).unwrap());
- }
-
- #[tokio::test]
- async fn test_table_route_is_not_found_error() {
- let state = UpdateMetadata::Downgrade;
- let env = TestingEnv::new();
- let persistent_context = new_persistent_context();
- let mut ctx = env.context_factory().new_context(persistent_context);
-
- let err = state.downgrade_leader_region(&mut ctx).await.unwrap_err();
-
- assert_matches!(err, Error::TableRouteNotFound { .. });
-
- assert!(!err.is_retryable());
- }
-
- #[tokio::test]
- async fn test_failed_to_update_table_route_error() {
- let state = UpdateMetadata::Downgrade;
- let persistent_context = new_persistent_context();
- let from_peer = persistent_context.from_peer.clone();
-
- let env = TestingEnv::new();
- let mut ctx = env.context_factory().new_context(persistent_context);
- let table_id = ctx.region_id().table_id();
-
- let table_info = new_test_table_info(1024, vec![1, 2]).into();
- let region_routes = vec![
- RegionRoute {
- region: Region::new_test(RegionId::new(1024, 1)),
- leader_peer: Some(from_peer.clone()),
- ..Default::default()
- },
- RegionRoute {
- region: Region::new_test(RegionId::new(1024, 2)),
- leader_peer: Some(Peer::empty(4)),
- ..Default::default()
- },
- ];
-
- let table_metadata_manager = env.table_metadata_manager();
- table_metadata_manager
- .create_table_metadata(table_info, region_routes)
- .await
- .unwrap();
-
- let original_table_route = table_metadata_manager
- .table_route_manager()
- .get(table_id)
- .await
- .unwrap()
- .unwrap();
-
- // modifies the table route.
- table_metadata_manager
- .update_leader_region_status(table_id, &original_table_route, |route| {
- if route.region.id == RegionId::new(1024, 2) {
- Some(Some(RegionStatus::Downgraded))
- } else {
- None
- }
- })
- .await
- .unwrap();
-
- // sets the old table route.
- ctx.volatile_ctx.table_route_info = Some(original_table_route);
-
- let err = state.downgrade_leader_region(&mut ctx).await.unwrap_err();
-
- assert_matches!(err, Error::RetryLater { .. });
-
- assert!(err.is_retryable());
- assert!(err.to_string().contains("Failed to update the table route"));
- }
-
- #[tokio::test]
- async fn test_next_downgrade_leader_region_state() {
- let mut state = Box::new(UpdateMetadata::Downgrade);
- let persistent_context = new_persistent_context();
- let from_peer = persistent_context.from_peer.clone();
-
- let env = TestingEnv::new();
- let mut ctx = env.context_factory().new_context(persistent_context);
- let table_id = ctx.region_id().table_id();
-
- let table_info = new_test_table_info(1024, vec![1, 2]).into();
- let region_routes = vec![RegionRoute {
- region: Region::new_test(RegionId::new(1024, 1)),
- leader_peer: Some(from_peer.clone()),
- ..Default::default()
- }];
-
- let table_metadata_manager = env.table_metadata_manager();
- table_metadata_manager
- .create_table_metadata(table_info, region_routes)
- .await
- .unwrap();
-
- let next = state.next(&mut ctx).await.unwrap();
-
- let _ = next
- .as_any()
- .downcast_ref::<DowngradeLeaderRegion>()
- .unwrap();
-
- let latest_table_route = table_metadata_manager
- .table_route_manager()
- .get(table_id)
- .await
- .unwrap()
- .unwrap();
-
- assert!(latest_table_route.region_routes[0].is_leader_downgraded());
- assert!(ctx.volatile_ctx.table_route_info.is_none());
- }
-}
diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs b/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs
new file mode 100644
index 000000000000..7ff5e59942b7
--- /dev/null
+++ b/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs
@@ -0,0 +1,210 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use common_meta::rpc::router::RegionStatus;
+use snafu::ResultExt;
+
+use crate::error::{self, Result};
+use crate::procedure::region_migration::update_metadata::UpdateMetadata;
+use crate::procedure::region_migration::Context;
+
+impl UpdateMetadata {
+ /// Downgrades the leader region.
+ ///
+ /// Abort(non-retry):
+ /// - TableRoute is not found.
+ ///
+ /// Retry:
+ /// - Failed to update [TableRouteValue](common_meta::key::table_region::TableRegionValue).
+ /// - Failed to retrieve the metadata of table.
+ ///
+ /// About the failure of updating the [TableRouteValue](common_meta::key::table_region::TableRegionValue):
+ ///
+ /// - There may be another [RegionMigrationProcedure](crate::procedure::region_migration::RegionMigrationProcedure)
+ /// that is executed concurrently for **other region**.
+ /// It will only update **other region** info. Therefore, It's safe to retry after failure.
+ ///
+ /// - There is no other DDL procedure executed concurrently for the current table.
+ pub async fn downgrade_leader_region(&self, ctx: &mut Context) -> Result<()> {
+ let table_metadata_manager = ctx.table_metadata_manager.clone();
+ let region_id = ctx.region_id();
+ let table_id = region_id.table_id();
+ let current_table_route_value = ctx.get_table_route_value().await?;
+
+ if let Err(err) = table_metadata_manager
+ .update_leader_region_status(table_id, current_table_route_value, |route| {
+ if route.region.id == region_id {
+ Some(Some(RegionStatus::Downgraded))
+ } else {
+ None
+ }
+ })
+ .await
+ .context(error::TableMetadataManagerSnafu)
+ {
+ debug_assert!(ctx.remove_table_route_value());
+ return error::RetryLaterSnafu {
+ reason: format!("Failed to update the table route during the downgrading leader region, error: {err}")
+ }.fail();
+ }
+
+ debug_assert!(ctx.remove_table_route_value());
+
+ Ok(())
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::assert_matches::assert_matches;
+
+ use common_meta::key::test_utils::new_test_table_info;
+ use common_meta::peer::Peer;
+ use common_meta::rpc::router::{Region, RegionRoute, RegionStatus};
+ use store_api::storage::RegionId;
+
+ use crate::error::Error;
+ use crate::procedure::region_migration::downgrade_leader_region::DowngradeLeaderRegion;
+ use crate::procedure::region_migration::test_util::{self, TestingEnv};
+ use crate::procedure::region_migration::update_metadata::UpdateMetadata;
+ use crate::procedure::region_migration::{ContextFactory, PersistentContext, State};
+
+ fn new_persistent_context() -> PersistentContext {
+ test_util::new_persistent_context(1, 2, RegionId::new(1024, 1))
+ }
+
+ #[test]
+ fn test_state_serialization() {
+ let state = UpdateMetadata::Downgrade;
+ let expected = r#"{"UpdateMetadata":"Downgrade"}"#;
+ assert_eq!(expected, serde_json::to_string(&state).unwrap());
+ }
+
+ #[tokio::test]
+ async fn test_table_route_is_not_found_error() {
+ let state = UpdateMetadata::Downgrade;
+ let env = TestingEnv::new();
+ let persistent_context = new_persistent_context();
+ let mut ctx = env.context_factory().new_context(persistent_context);
+
+ let err = state.downgrade_leader_region(&mut ctx).await.unwrap_err();
+
+ assert_matches!(err, Error::TableRouteNotFound { .. });
+
+ assert!(!err.is_retryable());
+ }
+
+ #[tokio::test]
+ async fn test_failed_to_update_table_route_error() {
+ let state = UpdateMetadata::Downgrade;
+ let persistent_context = new_persistent_context();
+ let from_peer = persistent_context.from_peer.clone();
+
+ let env = TestingEnv::new();
+ let mut ctx = env.context_factory().new_context(persistent_context);
+ let table_id = ctx.region_id().table_id();
+
+ let table_info = new_test_table_info(1024, vec![1, 2]).into();
+ let region_routes = vec![
+ RegionRoute {
+ region: Region::new_test(RegionId::new(1024, 1)),
+ leader_peer: Some(from_peer.clone()),
+ ..Default::default()
+ },
+ RegionRoute {
+ region: Region::new_test(RegionId::new(1024, 2)),
+ leader_peer: Some(Peer::empty(4)),
+ ..Default::default()
+ },
+ ];
+
+ let table_metadata_manager = env.table_metadata_manager();
+ table_metadata_manager
+ .create_table_metadata(table_info, region_routes)
+ .await
+ .unwrap();
+
+ let original_table_route = table_metadata_manager
+ .table_route_manager()
+ .get(table_id)
+ .await
+ .unwrap()
+ .unwrap();
+
+ // modifies the table route.
+ table_metadata_manager
+ .update_leader_region_status(table_id, &original_table_route, |route| {
+ if route.region.id == RegionId::new(1024, 2) {
+ Some(Some(RegionStatus::Downgraded))
+ } else {
+ None
+ }
+ })
+ .await
+ .unwrap();
+
+ // sets the old table route.
+ ctx.volatile_ctx.table_route = Some(original_table_route);
+
+ let err = state.downgrade_leader_region(&mut ctx).await.unwrap_err();
+
+ assert!(ctx.volatile_ctx.table_route.is_none());
+
+ assert_matches!(err, Error::RetryLater { .. });
+
+ assert!(err.is_retryable());
+ assert!(err.to_string().contains("Failed to update the table route"));
+ }
+
+ #[tokio::test]
+ async fn test_next_downgrade_leader_region_state() {
+ let mut state = Box::new(UpdateMetadata::Downgrade);
+ let persistent_context = new_persistent_context();
+ let from_peer = persistent_context.from_peer.clone();
+
+ let env = TestingEnv::new();
+ let mut ctx = env.context_factory().new_context(persistent_context);
+ let table_id = ctx.region_id().table_id();
+
+ let table_info = new_test_table_info(1024, vec![1, 2]).into();
+ let region_routes = vec![RegionRoute {
+ region: Region::new_test(RegionId::new(1024, 1)),
+ leader_peer: Some(from_peer.clone()),
+ ..Default::default()
+ }];
+
+ let table_metadata_manager = env.table_metadata_manager();
+ table_metadata_manager
+ .create_table_metadata(table_info, region_routes)
+ .await
+ .unwrap();
+
+ let next = state.next(&mut ctx).await.unwrap();
+
+ let _ = next
+ .as_any()
+ .downcast_ref::<DowngradeLeaderRegion>()
+ .unwrap();
+
+ let latest_table_route = table_metadata_manager
+ .table_route_manager()
+ .get(table_id)
+ .await
+ .unwrap()
+ .unwrap();
+
+ assert!(latest_table_route.region_routes[0].is_leader_downgraded());
+ assert!(ctx.volatile_ctx.table_route.is_none());
+ }
+}
diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs b/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs
new file mode 100644
index 000000000000..22c732815ea1
--- /dev/null
+++ b/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs
@@ -0,0 +1,376 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::HashMap;
+
+use common_meta::ddl::utils::region_storage_path;
+use common_meta::key::datanode_table::RegionInfo;
+use common_meta::rpc::router::RegionRoute;
+use common_telemetry::{info, warn};
+use snafu::{ensure, OptionExt, ResultExt};
+
+use crate::error::{self, Result};
+use crate::procedure::region_migration::update_metadata::UpdateMetadata;
+use crate::procedure::region_migration::Context;
+
+impl UpdateMetadata {
+ /// Returns new [Vec<RegionRoute>].
+ async fn build_upgrade_candidate_region_metadata(
+ &self,
+ ctx: &mut Context,
+ ) -> Result<Vec<RegionRoute>> {
+ let region_id = ctx.region_id();
+ let table_route_value = ctx.get_table_route_value().await?.clone();
+
+ let mut region_routes = table_route_value.region_routes.clone();
+ let region_route = region_routes
+ .iter_mut()
+ .find(|route| route.region.id == region_id)
+ .context(error::RegionRouteNotFoundSnafu { region_id })?;
+
+ // Removes downgraded status.
+ region_route.set_leader_status(None);
+
+ let candidate = &ctx.persistent_ctx.to_peer;
+ let expected_old_leader = &ctx.persistent_ctx.from_peer;
+
+ // Upgrades candidate to leader.
+ ensure!(region_route
+ .leader_peer
+ .take_if(|old_leader| old_leader.id == expected_old_leader.id)
+ .is_some(),
+ error::UnexpectedSnafu{
+ violated: format!("Unexpected region leader: {:?} during the upgrading candidate metadata, expected: {:?}", region_route.leader_peer, expected_old_leader),
+ }
+ );
+
+ region_route.leader_peer = Some(candidate.clone());
+ info!(
+ "Upgrading candidate region to leader region: {:?} for region: {}",
+ candidate, region_id
+ );
+
+ // Removes the candidate region in followers.
+ let removed = region_route
+ .follower_peers
+ .extract_if(|peer| peer.id == candidate.id)
+ .collect::<Vec<_>>();
+
+ if removed.len() > 1 {
+ warn!(
+ "Removes duplicated regions: {removed:?} during the upgrading candidate metadata for region: {region_id}"
+ );
+ }
+
+ Ok(region_routes)
+ }
+
+ /// Upgrades the candidate region.
+ ///
+ /// Abort(non-retry):
+ /// - TableRoute or RegionRoute is not found.
+ /// Typically, it's impossible, there is no other DDL procedure executed concurrently for the current table.
+ ///
+ /// Retry:
+ /// - Failed to update [TableRouteValue](common_meta::key::table_region::TableRegionValue).
+ /// - Failed to retrieve the metadata of table.
+ pub async fn upgrade_candidate_region(&self, ctx: &mut Context) -> Result<()> {
+ let region_id = ctx.region_id();
+ let table_metadata_manager = ctx.table_metadata_manager.clone();
+
+ let region_routes = self.build_upgrade_candidate_region_metadata(ctx).await?;
+ let table_info_value = ctx.get_table_info_value().await?;
+
+ let table_info = &table_info_value.table_info;
+ let region_storage_path =
+ region_storage_path(&table_info.catalog_name, &table_info.schema_name);
+ let engine = table_info.meta.engine.clone();
+ let region_options: HashMap<String, String> = (&table_info.meta.options).into();
+
+ // No remote fetch.
+ let table_route_value = ctx.get_table_route_value().await?;
+
+ if let Err(err) = table_metadata_manager
+ .update_table_route(
+ region_id.table_id(),
+ RegionInfo {
+ engine: engine.to_string(),
+ region_storage_path: region_storage_path.to_string(),
+ region_options: region_options.clone(),
+ },
+ table_route_value,
+ region_routes,
+ ®ion_options,
+ )
+ .await
+ .context(error::TableMetadataManagerSnafu)
+ {
+ debug_assert!(ctx.remove_table_route_value());
+ return error::RetryLaterSnafu {
+ reason: format!("Failed to update the table route during the upgrading candidate region, error: {err}")
+ }.fail();
+ };
+
+ debug_assert!(ctx.remove_table_route_value());
+ // Consumes the guard.
+ ctx.volatile_ctx.opening_region_guard.take();
+
+ Ok(())
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::assert_matches::assert_matches;
+
+ use common_meta::key::test_utils::new_test_table_info;
+ use common_meta::peer::Peer;
+ use common_meta::rpc::router::{Region, RegionRoute, RegionStatus};
+ use store_api::storage::RegionId;
+
+ use crate::error::Error;
+ use crate::procedure::region_migration::migration_end::RegionMigrationEnd;
+ use crate::procedure::region_migration::test_util::{self, TestingEnv};
+ use crate::procedure::region_migration::update_metadata::UpdateMetadata;
+ use crate::procedure::region_migration::{ContextFactory, PersistentContext, State};
+ use crate::region::lease_keeper::OpeningRegionKeeper;
+
+ fn new_persistent_context() -> PersistentContext {
+ test_util::new_persistent_context(1, 2, RegionId::new(1024, 1))
+ }
+
+ #[tokio::test]
+ async fn test_table_route_is_not_found_error() {
+ let state = UpdateMetadata::Upgrade;
+
+ let env = TestingEnv::new();
+ let persistent_context = new_persistent_context();
+ let mut ctx = env.context_factory().new_context(persistent_context);
+
+ let err = state
+ .build_upgrade_candidate_region_metadata(&mut ctx)
+ .await
+ .unwrap_err();
+
+ assert_matches!(err, Error::TableRouteNotFound { .. });
+ assert!(!err.is_retryable());
+ }
+
+ #[tokio::test]
+ async fn test_region_route_is_not_found() {
+ let state = UpdateMetadata::Upgrade;
+ let env = TestingEnv::new();
+ let persistent_context = new_persistent_context();
+ let mut ctx = env.context_factory().new_context(persistent_context);
+
+ let table_info = new_test_table_info(1024, vec![2]).into();
+ let region_routes = vec![RegionRoute {
+ region: Region::new_test(RegionId::new(1024, 2)),
+ leader_peer: Some(Peer::empty(4)),
+ ..Default::default()
+ }];
+
+ let table_metadata_manager = env.table_metadata_manager();
+ table_metadata_manager
+ .create_table_metadata(table_info, region_routes)
+ .await
+ .unwrap();
+
+ let err = state
+ .build_upgrade_candidate_region_metadata(&mut ctx)
+ .await
+ .unwrap_err();
+
+ assert_matches!(err, Error::RegionRouteNotFound { .. });
+ assert!(!err.is_retryable());
+ }
+
+ #[tokio::test]
+ async fn test_region_route_expected_leader() {
+ let state = UpdateMetadata::Upgrade;
+ let env = TestingEnv::new();
+ let persistent_context = new_persistent_context();
+ let mut ctx = env.context_factory().new_context(persistent_context);
+
+ let table_info = new_test_table_info(1024, vec![1]).into();
+ let region_routes = vec![RegionRoute {
+ region: Region::new_test(RegionId::new(1024, 1)),
+ leader_peer: Some(Peer::empty(3)),
+ ..Default::default()
+ }];
+
+ let table_metadata_manager = env.table_metadata_manager();
+ table_metadata_manager
+ .create_table_metadata(table_info, region_routes)
+ .await
+ .unwrap();
+
+ let err = state
+ .build_upgrade_candidate_region_metadata(&mut ctx)
+ .await
+ .unwrap_err();
+
+ assert_matches!(err, Error::Unexpected { .. });
+ assert!(!err.is_retryable());
+ assert!(err.to_string().contains("Unexpected region leader"));
+ }
+
+ #[tokio::test]
+ async fn test_build_upgrade_candidate_region_metadata() {
+ let state = UpdateMetadata::Upgrade;
+ let env = TestingEnv::new();
+ let persistent_context = new_persistent_context();
+ let mut ctx = env.context_factory().new_context(persistent_context);
+
+ let table_info = new_test_table_info(1024, vec![1]).into();
+ let region_routes = vec![RegionRoute {
+ region: Region::new_test(RegionId::new(1024, 1)),
+ leader_peer: Some(Peer::empty(1)),
+ follower_peers: vec![Peer::empty(2), Peer::empty(3)],
+ leader_status: Some(RegionStatus::Downgraded),
+ }];
+
+ let table_metadata_manager = env.table_metadata_manager();
+ table_metadata_manager
+ .create_table_metadata(table_info, region_routes)
+ .await
+ .unwrap();
+
+ let new_region_routes = state
+ .build_upgrade_candidate_region_metadata(&mut ctx)
+ .await
+ .unwrap();
+
+ assert!(!new_region_routes[0].is_leader_downgraded());
+ assert_eq!(new_region_routes[0].follower_peers, vec![Peer::empty(3)]);
+ assert_eq!(new_region_routes[0].leader_peer.as_ref().unwrap().id, 2);
+ }
+
+ #[tokio::test]
+ async fn test_failed_to_update_table_route_error() {
+ let state = UpdateMetadata::Upgrade;
+ let env = TestingEnv::new();
+ let persistent_context = new_persistent_context();
+ let mut ctx = env.context_factory().new_context(persistent_context);
+ let opening_keeper = OpeningRegionKeeper::default();
+
+ let table_id = 1024;
+ let table_info = new_test_table_info(table_id, vec![1]).into();
+ let region_routes = vec![
+ RegionRoute {
+ region: Region::new_test(RegionId::new(table_id, 1)),
+ leader_peer: Some(Peer::empty(1)),
+ follower_peers: vec![Peer::empty(5), Peer::empty(3)],
+ leader_status: Some(RegionStatus::Downgraded),
+ },
+ RegionRoute {
+ region: Region::new_test(RegionId::new(table_id, 2)),
+ leader_peer: Some(Peer::empty(4)),
+ leader_status: Some(RegionStatus::Downgraded),
+ ..Default::default()
+ },
+ ];
+
+ let table_metadata_manager = env.table_metadata_manager();
+ table_metadata_manager
+ .create_table_metadata(table_info, region_routes)
+ .await
+ .unwrap();
+
+ let original_table_route = table_metadata_manager
+ .table_route_manager()
+ .get(table_id)
+ .await
+ .unwrap()
+ .unwrap();
+
+ // modifies the table route.
+ table_metadata_manager
+ .update_leader_region_status(table_id, &original_table_route, |route| {
+ if route.region.id == RegionId::new(1024, 2) {
+ // Removes the status.
+ Some(None)
+ } else {
+ None
+ }
+ })
+ .await
+ .unwrap();
+
+ // sets the old table route.
+ ctx.volatile_ctx.table_route = Some(original_table_route);
+ let guard = opening_keeper
+ .register(2, RegionId::new(table_id, 1))
+ .unwrap();
+ ctx.volatile_ctx.opening_region_guard = Some(guard);
+
+ let err = state.upgrade_candidate_region(&mut ctx).await.unwrap_err();
+
+ assert!(ctx.volatile_ctx.table_route.is_none());
+ assert!(ctx.volatile_ctx.opening_region_guard.is_some());
+ assert_matches!(err, Error::RetryLater { .. });
+
+ assert!(err.is_retryable());
+ assert!(err.to_string().contains("Failed to update the table route"));
+ }
+
+ #[tokio::test]
+ async fn test_next_migration_end_state() {
+ let mut state = Box::new(UpdateMetadata::Upgrade);
+ let env = TestingEnv::new();
+ let persistent_context = new_persistent_context();
+ let mut ctx = env.context_factory().new_context(persistent_context);
+ let opening_keeper = OpeningRegionKeeper::default();
+
+ let table_id = 1024;
+ let table_info = new_test_table_info(table_id, vec![1]).into();
+ let region_routes = vec![RegionRoute {
+ region: Region::new_test(RegionId::new(table_id, 1)),
+ leader_peer: Some(Peer::empty(1)),
+ leader_status: Some(RegionStatus::Downgraded),
+ ..Default::default()
+ }];
+
+ let guard = opening_keeper
+ .register(2, RegionId::new(table_id, 1))
+ .unwrap();
+ ctx.volatile_ctx.opening_region_guard = Some(guard);
+
+ let table_metadata_manager = env.table_metadata_manager();
+ table_metadata_manager
+ .create_table_metadata(table_info, region_routes)
+ .await
+ .unwrap();
+
+ let next = state.next(&mut ctx).await.unwrap();
+
+ let _ = next.as_any().downcast_ref::<RegionMigrationEnd>().unwrap();
+
+ let region_routes = table_metadata_manager
+ .table_route_manager()
+ .get(table_id)
+ .await
+ .unwrap()
+ .unwrap()
+ .into_inner()
+ .region_routes;
+
+ assert!(ctx.volatile_ctx.table_route.is_none());
+ assert!(ctx.volatile_ctx.opening_region_guard.is_none());
+ assert_eq!(region_routes.len(), 1);
+ assert!(!region_routes[0].is_leader_downgraded());
+ assert!(region_routes[0].follower_peers.is_empty());
+ assert_eq!(region_routes[0].leader_peer.as_ref().unwrap().id, 2);
+ }
+}
|
feat
|
add update metadata step for upgrading candidate region (#2811)
|
a922dcd9dffc786224861105efa94593f43ae7f6
|
2025-03-10 13:52:35
|
Lei, HUANG
|
refactor(mito): move wal sync task to background (#5677)
| false
|
diff --git a/src/log-store/src/error.rs b/src/log-store/src/error.rs
index 962606666bd7..5f9a5fc4e3f5 100644
--- a/src/log-store/src/error.rs
+++ b/src/log-store/src/error.rs
@@ -40,15 +40,17 @@ pub enum Error {
actual: String,
},
- #[snafu(display("Failed to start log store gc task"))]
- StartGcTask {
+ #[snafu(display("Failed to start log store task: {}", name))]
+ StartWalTask {
+ name: String,
#[snafu(implicit)]
location: Location,
source: RuntimeError,
},
- #[snafu(display("Failed to stop log store gc task"))]
- StopGcTask {
+ #[snafu(display("Failed to stop log store task: {}", name))]
+ StopWalTask {
+ name: String,
#[snafu(implicit)]
location: Location,
source: RuntimeError,
diff --git a/src/log-store/src/raft_engine/backend.rs b/src/log-store/src/raft_engine/backend.rs
index 456c6e2c8883..3d41e5298d3f 100644
--- a/src/log-store/src/raft_engine/backend.rs
+++ b/src/log-store/src/raft_engine/backend.rs
@@ -35,7 +35,7 @@ use common_runtime::RepeatedTask;
use raft_engine::{Config, Engine, LogBatch, ReadableSize, RecoveryMode};
use snafu::{IntoError, ResultExt};
-use crate::error::{self, Error, IoSnafu, RaftEngineSnafu, StartGcTaskSnafu};
+use crate::error::{self, Error, IoSnafu, RaftEngineSnafu, StartWalTaskSnafu};
use crate::raft_engine::log_store::PurgeExpiredFilesFunction;
pub(crate) const SYSTEM_NAMESPACE: u64 = 0;
@@ -93,7 +93,8 @@ impl RaftEngineBackend {
);
gc_task
.start(common_runtime::global_runtime())
- .context(StartGcTaskSnafu)?;
+ .context(StartWalTaskSnafu { name: "gc_task" })?;
+
Ok(Self {
engine: RwLock::new(engine),
_gc_task: gc_task,
diff --git a/src/log-store/src/raft_engine/log_store.rs b/src/log-store/src/raft_engine/log_store.rs
index 3e2ff5023b24..c7df8be66c4e 100644
--- a/src/log-store/src/raft_engine/log_store.rs
+++ b/src/log-store/src/raft_engine/log_store.rs
@@ -14,7 +14,6 @@
use std::collections::{hash_map, HashMap};
use std::fmt::{Debug, Formatter};
-use std::sync::atomic::{AtomicI64, Ordering};
use std::sync::Arc;
use std::time::Duration;
@@ -32,7 +31,7 @@ use store_api::storage::RegionId;
use crate::error::{
AddEntryLogBatchSnafu, DiscontinuousLogIndexSnafu, Error, FetchEntrySnafu,
IllegalNamespaceSnafu, IllegalStateSnafu, InvalidProviderSnafu, OverrideCompactedEntrySnafu,
- RaftEngineSnafu, Result, StartGcTaskSnafu, StopGcTaskSnafu,
+ RaftEngineSnafu, Result, StartWalTaskSnafu, StopWalTaskSnafu,
};
use crate::metrics;
use crate::raft_engine::backend::SYSTEM_NAMESPACE;
@@ -46,7 +45,7 @@ pub struct RaftEngineLogStore {
read_batch_size: usize,
engine: Arc<Engine>,
gc_task: RepeatedTask<Error>,
- last_sync_time: AtomicI64,
+ sync_task: RepeatedTask<Error>,
}
pub struct PurgeExpiredFilesFunction {
@@ -83,6 +82,31 @@ impl TaskFunction<Error> for PurgeExpiredFilesFunction {
}
}
+pub struct SyncWalTaskFunction {
+ engine: Arc<Engine>,
+}
+
+#[async_trait::async_trait]
+impl TaskFunction<Error> for SyncWalTaskFunction {
+ async fn call(&mut self) -> std::result::Result<(), Error> {
+ let engine = self.engine.clone();
+ if let Err(e) = tokio::task::spawn_blocking(move || engine.sync()).await {
+ error!(e; "Failed to sync raft engine log files");
+ };
+ Ok(())
+ }
+
+ fn name(&self) -> &str {
+ "SyncWalTaskFunction"
+ }
+}
+
+impl SyncWalTaskFunction {
+ pub fn new(engine: Arc<Engine>) -> Self {
+ Self { engine }
+ }
+}
+
impl RaftEngineLogStore {
pub async fn try_new(dir: String, config: &RaftEngineConfig) -> Result<Self> {
let raft_engine_config = Config {
@@ -104,13 +128,18 @@ impl RaftEngineLogStore {
}),
);
+ let sync_task = RepeatedTask::new(
+ config.sync_period.unwrap_or(Duration::from_secs(5)),
+ Box::new(SyncWalTaskFunction::new(engine.clone())),
+ );
+
let log_store = Self {
sync_write: config.sync_write,
sync_period: config.sync_period,
read_batch_size: config.read_batch_size,
engine,
gc_task,
- last_sync_time: AtomicI64::new(0),
+ sync_task,
};
log_store.start()?;
Ok(log_store)
@@ -123,7 +152,10 @@ impl RaftEngineLogStore {
fn start(&self) -> Result<()> {
self.gc_task
.start(common_runtime::global_runtime())
- .context(StartGcTaskSnafu)
+ .context(StartWalTaskSnafu { name: "gc_task" })?;
+ self.sync_task
+ .start(common_runtime::global_runtime())
+ .context(StartWalTaskSnafu { name: "sync_task" })
}
fn span(&self, provider: &RaftEngineProvider) -> (Option<u64>, Option<u64>) {
@@ -220,7 +252,14 @@ impl LogStore for RaftEngineLogStore {
type Error = Error;
async fn stop(&self) -> Result<()> {
- self.gc_task.stop().await.context(StopGcTaskSnafu)
+ self.gc_task
+ .stop()
+ .await
+ .context(StopWalTaskSnafu { name: "gc_task" })?;
+ self.sync_task
+ .stop()
+ .await
+ .context(StopWalTaskSnafu { name: "sync_task" })
}
/// Appends a batch of entries to logstore. `RaftEngineLogStore` assures the atomicity of
@@ -240,20 +279,9 @@ impl LogStore for RaftEngineLogStore {
}
let (mut batch, last_entry_ids) = self.entries_to_batch(entries)?;
-
- let mut sync = self.sync_write;
-
- if let Some(sync_period) = &self.sync_period {
- let now = common_time::util::current_time_millis();
- if now - self.last_sync_time.load(Ordering::Relaxed) >= sync_period.as_millis() as i64 {
- self.last_sync_time.store(now, Ordering::Relaxed);
- sync = true;
- }
- }
-
let _ = self
.engine
- .write(&mut batch, sync)
+ .write(&mut batch, self.sync_write)
.context(RaftEngineSnafu)?;
Ok(AppendBatchResponse { last_entry_ids })
|
refactor
|
move wal sync task to background (#5677)
|
041cd422a1b284b3a267613eab79031da8ff571a
|
2023-01-03 16:45:47
|
Ning Sun
|
refactor: do not call use upon mysql connection (#818)
| false
|
diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs
index fa790739a0ea..8e110e6b0ea0 100644
--- a/src/servers/src/error.rs
+++ b/src/servers/src/error.rs
@@ -249,6 +249,9 @@ pub enum Error {
#[snafu(backtrace)]
source: common_grpc::error::Error,
},
+
+ #[snafu(display("Cannot find requested database: {}-{}", catalog, schema))]
+ DatabaseNotFound { catalog: String, schema: String },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -306,6 +309,8 @@ impl ErrorExt for Error {
| InvalidAuthorizationHeader { .. }
| InvalidBase64Value { .. }
| InvalidUtf8Value { .. } => StatusCode::InvalidAuthHeader,
+
+ DatabaseNotFound { .. } => StatusCode::DatabaseNotFound,
}
}
diff --git a/src/servers/src/mysql/handler.rs b/src/servers/src/mysql/handler.rs
index c237ddc5d687..0e9b3a1b2b2a 100644
--- a/src/servers/src/mysql/handler.rs
+++ b/src/servers/src/mysql/handler.rs
@@ -17,6 +17,7 @@ use std::sync::Arc;
use std::time::Instant;
use async_trait::async_trait;
+use common_catalog::consts::DEFAULT_CATALOG_NAME;
use common_query::Output;
use common_telemetry::{error, trace};
use opensrv_mysql::{
@@ -183,14 +184,21 @@ impl<W: AsyncWrite + Send + Sync + Unpin> AsyncMysqlShim<W> for MysqlInstanceShi
}
async fn on_init<'a>(&'a mut self, database: &'a str, w: InitWriter<'a, W>) -> Result<()> {
- let query = format!("USE {}", database.trim());
- let output = self.do_query(&query).await.remove(0);
- if let Err(e) = output {
- w.error(ErrorKind::ER_UNKNOWN_ERROR, e.to_string().as_bytes())
- .await
+ // TODO(sunng87): set catalog
+ if self
+ .query_handler
+ .is_valid_schema(DEFAULT_CATALOG_NAME, database)?
+ {
+ let context = self.session.context();
+ // TODO(sunng87): set catalog
+ context.set_current_schema(database);
+ w.ok().await.map_err(|e| e.into())
} else {
- w.ok().await
+ error::DatabaseNotFoundSnafu {
+ catalog: DEFAULT_CATALOG_NAME,
+ schema: database,
+ }
+ .fail()
}
- .map_err(|e| e.into())
}
}
diff --git a/src/servers/tests/mysql/mysql_server_test.rs b/src/servers/tests/mysql/mysql_server_test.rs
index 27dfa27f2533..280d0a1dc2cd 100644
--- a/src/servers/tests/mysql/mysql_server_test.rs
+++ b/src/servers/tests/mysql/mysql_server_test.rs
@@ -16,6 +16,7 @@ use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
+use common_catalog::consts::DEFAULT_SCHEMA_NAME;
use common_recordbatch::RecordBatch;
use common_runtime::Builder as RuntimeBuilder;
use datatypes::schema::Schema;
@@ -91,7 +92,7 @@ async fn test_shutdown_mysql_server() -> Result<()> {
for _ in 0..2 {
join_handles.push(tokio::spawn(async move {
for _ in 0..1000 {
- match create_connection(server_port, false).await {
+ match create_connection(server_port, None, false).await {
Ok(mut connection) => {
let result: u32 = connection
.query_first("SELECT uint32s FROM numbers LIMIT 1")
@@ -197,7 +198,39 @@ async fn test_server_required_secure_client_plain() -> Result<()> {
let listening = "127.0.0.1:0".parse::<SocketAddr>().unwrap();
let server_addr = mysql_server.start(listening).await.unwrap();
- let r = create_connection(server_addr.port(), client_tls).await;
+ let r = create_connection(server_addr.port(), None, client_tls).await;
+ assert!(r.is_err());
+ Ok(())
+}
+
+#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
+async fn test_db_name() -> Result<()> {
+ let server_tls = TlsOption::default();
+ let client_tls = false;
+
+ #[allow(unused)]
+ let TestingData {
+ column_schemas,
+ mysql_columns_def,
+ columns,
+ mysql_text_output_rows,
+ } = all_datatype_testing_data();
+ let schema = Arc::new(Schema::new(column_schemas.clone()));
+ let recordbatch = RecordBatch::new(schema, columns).unwrap();
+ let table = MemTable::new("all_datatypes", recordbatch);
+
+ let mysql_server = create_mysql_server(table, server_tls)?;
+
+ let listening = "127.0.0.1:0".parse::<SocketAddr>().unwrap();
+ let server_addr = mysql_server.start(listening).await.unwrap();
+
+ let r = create_connection(server_addr.port(), None, client_tls).await;
+ assert!(r.is_ok());
+
+ let r = create_connection(server_addr.port(), Some(DEFAULT_SCHEMA_NAME), client_tls).await;
+ assert!(r.is_ok());
+
+ let r = create_connection(server_addr.port(), Some("tomcat"), client_tls).await;
assert!(r.is_err());
Ok(())
}
@@ -219,7 +252,7 @@ async fn do_test_query_all_datatypes(server_tls: TlsOption, client_tls: bool) ->
let listening = "127.0.0.1:0".parse::<SocketAddr>().unwrap();
let server_addr = mysql_server.start(listening).await.unwrap();
- let mut connection = create_connection(server_addr.port(), client_tls)
+ let mut connection = create_connection(server_addr.port(), None, client_tls)
.await
.unwrap();
@@ -261,7 +294,7 @@ async fn test_query_concurrently() -> Result<()> {
join_handles.push(tokio::spawn(async move {
let mut rand: StdRng = rand::SeedableRng::from_entropy();
- let mut connection = create_connection(server_port, false).await.unwrap();
+ let mut connection = create_connection(server_port, None, false).await.unwrap();
for _ in 0..expect_executed_queries_per_worker {
let expected: u32 = rand.gen_range(0..100);
let result: u32 = connection
@@ -275,7 +308,7 @@ async fn test_query_concurrently() -> Result<()> {
let should_recreate_conn = expected == 1;
if should_recreate_conn {
- connection = create_connection(server_port, false).await.unwrap();
+ connection = create_connection(server_port, None, false).await.unwrap();
}
}
expect_executed_queries_per_worker
@@ -289,12 +322,17 @@ async fn test_query_concurrently() -> Result<()> {
Ok(())
}
-async fn create_connection(port: u16, ssl: bool) -> mysql_async::Result<mysql_async::Conn> {
+async fn create_connection(
+ port: u16,
+ db_name: Option<&str>,
+ ssl: bool,
+) -> mysql_async::Result<mysql_async::Conn> {
let mut opts = mysql_async::OptsBuilder::default()
.ip_or_hostname("127.0.0.1")
.tcp_port(port)
.prefer_socket(false)
.wait_timeout(Some(1000))
+ .db_name(db_name)
.user(Some("greptime".to_string()))
.pass(Some("greptime".to_string()));
|
refactor
|
do not call use upon mysql connection (#818)
|
88c3d331a1039832e8cbe4e1ac71147a687e5c33
|
2025-02-07 12:51:20
|
shuiyisong
|
refactor: otlp logs insertion (#5479)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index aa00fcf08154..6e374f8b6e8b 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -6610,17 +6610,18 @@ dependencies = [
[[package]]
name = "meter-core"
version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-meter.git?rev=a10facb353b41460eeb98578868ebf19c2084fac#a10facb353b41460eeb98578868ebf19c2084fac"
+source = "git+https://github.com/GreptimeTeam/greptime-meter.git?rev=5618e779cf2bb4755b499c630fba4c35e91898cb#5618e779cf2bb4755b499c630fba4c35e91898cb"
dependencies = [
"anymap2",
"once_cell",
"parking_lot 0.12.3",
+ "tracing",
]
[[package]]
name = "meter-macros"
version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-meter.git?rev=a10facb353b41460eeb98578868ebf19c2084fac#a10facb353b41460eeb98578868ebf19c2084fac"
+source = "git+https://github.com/GreptimeTeam/greptime-meter.git?rev=5618e779cf2bb4755b499c630fba4c35e91898cb#5618e779cf2bb4755b499c630fba4c35e91898cb"
dependencies = [
"meter-core",
]
diff --git a/Cargo.toml b/Cargo.toml
index c094a2d651a3..de50b357a372 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -139,7 +139,7 @@ jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a3
lazy_static = "1.4"
local-ip-address = "0.6"
loki-api = { git = "https://github.com/shuiyisong/tracing-loki", branch = "chore/prost_version" }
-meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "a10facb353b41460eeb98578868ebf19c2084fac" }
+meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "5618e779cf2bb4755b499c630fba4c35e91898cb" }
mockall = "0.11.4"
moka = "0.12"
nalgebra = "0.33"
@@ -283,7 +283,7 @@ pprof = { git = "https://github.com/GreptimeTeam/pprof-rs", rev = "1bd1e21" }
[workspace.dependencies.meter-macros]
git = "https://github.com/GreptimeTeam/greptime-meter.git"
-rev = "a10facb353b41460eeb98578868ebf19c2084fac"
+rev = "5618e779cf2bb4755b499c630fba4c35e91898cb"
[profile.release]
debug = 1
diff --git a/src/pipeline/src/etl.rs b/src/pipeline/src/etl.rs
index d55cf25d543d..08ce929fd61d 100644
--- a/src/pipeline/src/etl.rs
+++ b/src/pipeline/src/etl.rs
@@ -312,6 +312,7 @@ pub(crate) fn find_key_index(intermediate_keys: &[String], key: &str, kind: &str
}
/// SelectInfo is used to store the selected keys from OpenTelemetry record attrs
+/// The key is used to uplift value from the attributes and serve as column name in the table
#[derive(Default)]
pub struct SelectInfo {
pub keys: Vec<String>,
diff --git a/src/pipeline/src/etl/transform/transformer/greptime.rs b/src/pipeline/src/etl/transform/transformer/greptime.rs
index dedb07e842d6..7bbca8ad771e 100644
--- a/src/pipeline/src/etl/transform/transformer/greptime.rs
+++ b/src/pipeline/src/etl/transform/transformer/greptime.rs
@@ -17,7 +17,7 @@ pub mod coerce;
use std::collections::HashSet;
use std::sync::Arc;
-use ahash::HashMap;
+use ahash::{HashMap, HashMapExt};
use api::helper::proto_value_type;
use api::v1::column_data_type_extension::TypeExt;
use api::v1::value::ValueData;
@@ -245,6 +245,15 @@ pub struct SchemaInfo {
pub index: HashMap<String, usize>,
}
+impl SchemaInfo {
+ pub fn with_capacity(capacity: usize) -> Self {
+ Self {
+ schema: Vec::with_capacity(capacity),
+ index: HashMap::with_capacity(capacity),
+ }
+ }
+}
+
fn resolve_schema(
index: Option<usize>,
value_data: ValueData,
diff --git a/src/servers/src/http/loki.rs b/src/servers/src/http/loki.rs
index 05c2366a0197..219db1698694 100644
--- a/src/servers/src/http/loki.rs
+++ b/src/servers/src/http/loki.rs
@@ -103,11 +103,7 @@ pub async fn loki_ingest(
// fill Null for missing values
for row in rows.iter_mut() {
- if row.len() < schemas.len() {
- for _ in row.len()..schemas.len() {
- row.push(GreptimeValue { value_data: None });
- }
- }
+ row.resize(schemas.len(), GreptimeValue::default());
}
let rows = Rows {
diff --git a/src/servers/src/http/otlp.rs b/src/servers/src/http/otlp.rs
index b5c4607c29e3..5cc8f777c44a 100644
--- a/src/servers/src/http/otlp.rs
+++ b/src/servers/src/http/otlp.rs
@@ -38,6 +38,7 @@ use snafu::prelude::*;
use super::header::{write_cost_header_map, CONTENT_TYPE_PROTOBUF};
use crate::error::{self, PipelineSnafu, Result};
use crate::http::extractor::{LogTableName, PipelineInfo, SelectInfoWrapper, TraceTableName};
+use crate::metrics::METRIC_HTTP_OPENTELEMETRY_LOGS_ELAPSED;
use crate::otlp::trace::TRACE_TABLE_NAME;
use crate::query_handler::OpenTelemetryProtocolHandlerRef;
@@ -112,7 +113,7 @@ pub async fn logs(
let db = query_ctx.get_db_string();
query_ctx.set_channel(Channel::Otlp);
let query_ctx = Arc::new(query_ctx);
- let _timer = crate::metrics::METRIC_HTTP_OPENTELEMETRY_LOGS_ELAPSED
+ let _timer = METRIC_HTTP_OPENTELEMETRY_LOGS_ELAPSED
.with_label_values(&[db.as_str()])
.start_timer();
let request = ExportLogsServiceRequest::decode(bytes).context(error::DecodeOtlpRequestSnafu)?;
diff --git a/src/servers/src/otlp/logs.rs b/src/servers/src/otlp/logs.rs
index f11cd4ff3c68..71c104666b5f 100644
--- a/src/servers/src/otlp/logs.rs
+++ b/src/servers/src/otlp/logs.rs
@@ -13,7 +13,6 @@
// limitations under the License.
use std::collections::{BTreeMap, HashMap as StdHashMap};
-use std::mem;
use api::v1::column_data_type_extension::TypeExt;
use api::v1::value::ValueData;
@@ -310,7 +309,10 @@ fn build_otlp_logs_identity_schema() -> Vec<ColumnSchema> {
.collect::<Vec<ColumnSchema>>()
}
-fn build_otlp_build_in_row(log: LogRecord, parse_ctx: &mut ParseContext) -> Row {
+fn build_otlp_build_in_row(
+ log: LogRecord,
+ parse_ctx: &mut ParseContext,
+) -> (Row, JsonbValue<'static>) {
let log_attr = key_value_to_jsonb(log.attributes);
let ts = if log.time_unix_nano != 0 {
log.time_unix_nano
@@ -365,50 +367,52 @@ fn build_otlp_build_in_row(log: LogRecord, parse_ctx: &mut ParseContext) -> Row
value_data: Some(ValueData::StringValue(parse_ctx.resource_url.clone())),
},
];
- Row { values: row }
+ (Row { values: row }, log_attr)
}
fn extract_field_from_attr_and_combine_schema(
- schema_info: &mut SchemaInfo,
- log_select: &SelectInfo,
- jsonb: &jsonb::Value,
+ select_info: &SelectInfo,
+ select_schema: &mut SchemaInfo,
+ attrs: &jsonb::Value,
) -> Result<Vec<GreptimeValue>> {
- if log_select.keys.is_empty() {
- return Ok(Vec::new());
- }
- let mut append_value = Vec::with_capacity(schema_info.schema.len());
- for _ in schema_info.schema.iter() {
- append_value.push(GreptimeValue { value_data: None });
- }
- for k in &log_select.keys {
- let index = schema_info.index.get(k).copied();
- if let Some(value) = jsonb.get_by_name_ignore_case(k).cloned() {
- if let Some((schema, value)) = decide_column_schema(k, value)? {
- if let Some(index) = index {
- let column_schema = &schema_info.schema[index];
- ensure!(
- column_schema.datatype == schema.datatype,
- IncompatibleSchemaSnafu {
- column_name: k.clone(),
- datatype: column_schema.datatype().as_str_name(),
- expected: column_schema.datatype,
- actual: schema.datatype,
- }
- );
- append_value[index] = value;
- } else {
- let key = k.clone();
- schema_info.schema.push(schema);
- schema_info.index.insert(key, schema_info.schema.len() - 1);
- append_value.push(value);
+ // note we use schema.len instead of select_keys.len
+ // because the len of the row value should always matches the len of the schema
+ let mut extracted_values = vec![GreptimeValue::default(); select_schema.schema.len()];
+
+ for key in select_info.keys.iter() {
+ let Some(value) = attrs.get_by_name_ignore_case(key).cloned() else {
+ continue;
+ };
+ let Some((schema, value)) = decide_column_schema_and_convert_value(key, value)? else {
+ continue;
+ };
+
+ if let Some(index) = select_schema.index.get(key) {
+ let column_schema = &select_schema.schema[*index];
+ // datatype of the same column name should be the same
+ ensure!(
+ column_schema.datatype == schema.datatype,
+ IncompatibleSchemaSnafu {
+ column_name: key,
+ datatype: column_schema.datatype().as_str_name(),
+ expected: column_schema.datatype,
+ actual: schema.datatype,
}
- }
+ );
+ extracted_values[*index] = value;
+ } else {
+ select_schema.schema.push(schema);
+ select_schema
+ .index
+ .insert(key.clone(), select_schema.schema.len() - 1);
+ extracted_values.push(value);
}
}
- Ok(append_value)
+
+ Ok(extracted_values)
}
-fn decide_column_schema(
+fn decide_column_schema_and_convert_value(
column_name: &str,
value: JsonbValue,
) -> Result<Option<(ColumnSchema, GreptimeValue)>> {
@@ -475,128 +479,69 @@ fn decide_column_schema(
})
}
-#[derive(Debug, Clone, Copy)]
-enum OpenTelemetryLogRecordAttrType {
- Resource,
- Scope,
- Log,
-}
-
-fn merge_schema(
- input_schemas: Vec<(&SchemaInfo, OpenTelemetryLogRecordAttrType)>,
-) -> BTreeMap<&String, (OpenTelemetryLogRecordAttrType, usize, &ColumnSchema)> {
- let mut schemas = BTreeMap::new();
- input_schemas
- .into_iter()
- .for_each(|(schema_info, attr_type)| {
- for (key, index) in schema_info.index.iter() {
- if let Some(col_schema) = schema_info.schema.get(*index) {
- schemas.insert(key, (attr_type, *index, col_schema));
- }
- }
- });
- schemas
-}
-
fn parse_export_logs_service_request_to_rows(
request: ExportLogsServiceRequest,
select_info: Box<SelectInfo>,
) -> Result<Rows> {
let mut schemas = build_otlp_logs_identity_schema();
- let mut extra_resource_schema = SchemaInfo::default();
- let mut extra_scope_schema = SchemaInfo::default();
- let mut extra_log_schema = SchemaInfo::default();
- let mut parse_ctx = ParseContext::new(
- &mut extra_resource_schema,
- &mut extra_scope_schema,
- &mut extra_log_schema,
- );
- let parse_infos = parse_resource(&select_info, &mut parse_ctx, request.resource_logs)?;
- // order of schema is important
- // resource < scope < log
- // do not change the order
- let final_extra_schema_info = merge_schema(vec![
- (
- &extra_resource_schema,
- OpenTelemetryLogRecordAttrType::Resource,
- ),
- (&extra_scope_schema, OpenTelemetryLogRecordAttrType::Scope),
- (&extra_log_schema, OpenTelemetryLogRecordAttrType::Log),
- ]);
+ let mut parse_ctx = ParseContext::new(select_info);
+ let mut rows = parse_resource(&mut parse_ctx, request.resource_logs)?;
- let final_extra_schema = final_extra_schema_info
- .iter()
- .map(|(_, (_, _, v))| (*v).clone())
- .collect::<Vec<_>>();
-
- let extra_schema_len = final_extra_schema.len();
- schemas.extend(final_extra_schema);
-
- let mut results = Vec::with_capacity(parse_infos.len());
- for parse_info in parse_infos.into_iter() {
- let mut row = parse_info.values;
- let mut resource_values = parse_info.resource_extracted_values;
- let mut scope_values = parse_info.scope_extracted_values;
- let mut log_values = parse_info.log_extracted_values;
-
- let mut final_extra_values = vec![GreptimeValue { value_data: None }; extra_schema_len];
- for (idx, (_, (attr_type, index, _))) in final_extra_schema_info.iter().enumerate() {
- let value = match attr_type {
- OpenTelemetryLogRecordAttrType::Resource => resource_values.get_mut(*index),
- OpenTelemetryLogRecordAttrType::Scope => scope_values.get_mut(*index),
- OpenTelemetryLogRecordAttrType::Log => log_values.get_mut(*index),
- };
- if let Some(value) = value {
- // swap value to final_extra_values
- mem::swap(&mut final_extra_values[idx], value);
- }
- }
+ schemas.extend(parse_ctx.select_schema.schema);
+
+ rows.iter_mut().for_each(|row| {
+ row.values.resize(schemas.len(), GreptimeValue::default());
+ });
- row.values.extend(final_extra_values);
- results.push(row);
- }
Ok(Rows {
schema: schemas,
- rows: results,
+ rows,
})
}
fn parse_resource(
- select_info: &SelectInfo,
parse_ctx: &mut ParseContext,
resource_logs_vec: Vec<ResourceLogs>,
-) -> Result<Vec<ParseInfo>> {
- let mut results = Vec::new();
+) -> Result<Vec<Row>> {
+ let total_len = resource_logs_vec
+ .iter()
+ .flat_map(|r| r.scope_logs.iter())
+ .map(|s| s.log_records.len())
+ .sum();
+
+ let mut results = Vec::with_capacity(total_len);
for r in resource_logs_vec {
parse_ctx.resource_attr = r
.resource
.map(|resource| key_value_to_jsonb(resource.attributes))
.unwrap_or(JsonbValue::Null);
+
parse_ctx.resource_url = r.schema_url;
- let resource_extracted_values = extract_field_from_attr_and_combine_schema(
- parse_ctx.extra_resource_schema,
- select_info,
+ parse_ctx.resource_uplift_values = extract_field_from_attr_and_combine_schema(
+ &parse_ctx.select_info,
+ &mut parse_ctx.select_schema,
&parse_ctx.resource_attr,
)?;
- let rows = parse_scope(
- select_info,
- r.scope_logs,
- parse_ctx,
- resource_extracted_values,
- )?;
+
+ let rows = parse_scope(r.scope_logs, parse_ctx)?;
results.extend(rows);
}
Ok(results)
}
struct ParseContext<'a> {
- // selector schema
- extra_resource_schema: &'a mut SchemaInfo,
- extra_scope_schema: &'a mut SchemaInfo,
- extra_log_schema: &'a mut SchemaInfo,
+ // input selected keys
+ select_info: Box<SelectInfo>,
+ // schema infos for selected keys from resource/scope/log for current request
+ // since the value override from bottom to top, the max capacity is the length of the keys
+ select_schema: SchemaInfo,
+
+ // extracted and uplifted values using select keys
+ resource_uplift_values: Vec<GreptimeValue>,
+ scope_uplift_values: Vec<GreptimeValue>,
// passdown values
resource_url: String,
@@ -608,15 +553,13 @@ struct ParseContext<'a> {
}
impl<'a> ParseContext<'a> {
- pub fn new(
- extra_resource_schema: &'a mut SchemaInfo,
- extra_scope_schema: &'a mut SchemaInfo,
- extra_log_schema: &'a mut SchemaInfo,
- ) -> ParseContext<'a> {
+ pub fn new(select_info: Box<SelectInfo>) -> ParseContext<'a> {
+ let len = select_info.keys.len();
ParseContext {
- extra_resource_schema,
- extra_scope_schema,
- extra_log_schema,
+ select_info,
+ select_schema: SchemaInfo::with_capacity(len),
+ resource_uplift_values: vec![],
+ scope_uplift_values: vec![],
resource_url: String::new(),
resource_attr: JsonbValue::Null,
scope_name: None,
@@ -627,74 +570,68 @@ impl<'a> ParseContext<'a> {
}
}
-fn parse_scope(
- select_info: &SelectInfo,
- scopes_log_vec: Vec<ScopeLogs>,
- parse_ctx: &mut ParseContext,
- resource_extracted_values: Vec<GreptimeValue>,
-) -> Result<Vec<ParseInfo>> {
- let mut results = Vec::new();
+fn parse_scope(scopes_log_vec: Vec<ScopeLogs>, parse_ctx: &mut ParseContext) -> Result<Vec<Row>> {
+ let len = scopes_log_vec.iter().map(|l| l.log_records.len()).sum();
+ let mut results = Vec::with_capacity(len);
+
for scope_logs in scopes_log_vec {
let (scope_attrs, scope_version, scope_name) = scope_to_jsonb(scope_logs.scope);
parse_ctx.scope_name = scope_name;
parse_ctx.scope_version = scope_version;
- parse_ctx.scope_attrs = scope_attrs;
parse_ctx.scope_url = scope_logs.schema_url;
+ parse_ctx.scope_attrs = scope_attrs;
- let scope_extracted_values = extract_field_from_attr_and_combine_schema(
- parse_ctx.extra_scope_schema,
- select_info,
+ parse_ctx.scope_uplift_values = extract_field_from_attr_and_combine_schema(
+ &parse_ctx.select_info,
+ &mut parse_ctx.select_schema,
&parse_ctx.scope_attrs,
)?;
- let rows = parse_log(
- select_info,
- scope_logs.log_records,
- parse_ctx,
- &resource_extracted_values,
- &scope_extracted_values,
- )?;
+ let rows = parse_log(scope_logs.log_records, parse_ctx)?;
results.extend(rows);
}
Ok(results)
}
-fn parse_log(
- select_info: &SelectInfo,
- log_records: Vec<LogRecord>,
- parse_ctx: &mut ParseContext,
- resource_extracted_values: &[GreptimeValue],
- scope_extracted_values: &[GreptimeValue],
-) -> Result<Vec<ParseInfo>> {
+fn parse_log(log_records: Vec<LogRecord>, parse_ctx: &mut ParseContext) -> Result<Vec<Row>> {
let mut result = Vec::with_capacity(log_records.len());
for log in log_records {
- let log_attr = key_value_to_jsonb(log.attributes.clone());
+ let (mut row, log_attr) = build_otlp_build_in_row(log, parse_ctx);
- let row = build_otlp_build_in_row(log, parse_ctx);
-
- let log_extracted_values = extract_field_from_attr_and_combine_schema(
- parse_ctx.extra_log_schema,
- select_info,
+ let log_values = extract_field_from_attr_and_combine_schema(
+ &parse_ctx.select_info,
+ &mut parse_ctx.select_schema,
&log_attr,
)?;
- let parse_info = ParseInfo {
- values: row,
- resource_extracted_values: resource_extracted_values.to_vec(),
- scope_extracted_values: scope_extracted_values.to_vec(),
- log_extracted_values,
- };
- result.push(parse_info);
+ let extracted_values = merge_values(
+ log_values,
+ &parse_ctx.scope_uplift_values,
+ &parse_ctx.resource_uplift_values,
+ );
+
+ row.values.extend(extracted_values);
+
+ result.push(row);
}
Ok(result)
}
-struct ParseInfo {
- values: Row,
- resource_extracted_values: Vec<GreptimeValue>,
- scope_extracted_values: Vec<GreptimeValue>,
- log_extracted_values: Vec<GreptimeValue>,
+fn merge_values(
+ log: Vec<GreptimeValue>,
+ scope: &[GreptimeValue],
+ resource: &[GreptimeValue],
+) -> Vec<GreptimeValue> {
+ log.into_iter()
+ .enumerate()
+ .map(|(i, value)| GreptimeValue {
+ value_data: value
+ .value_data
+ .or_else(|| scope.get(i).and_then(|x| x.value_data.clone()))
+ .or_else(|| resource.get(i).and_then(|x| x.value_data.clone())),
+ })
+ .collect()
}
/// transform otlp logs request to pipeline value
diff --git a/src/servers/src/otlp/utils.rs b/src/servers/src/otlp/utils.rs
index be3741666df1..1ed37de45209 100644
--- a/src/servers/src/otlp/utils.rs
+++ b/src/servers/src/otlp/utils.rs
@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::collections::BTreeMap;
-
use api::v1::value::ValueData;
use api::v1::ColumnDataType;
use itertools::Itertools;
@@ -47,18 +45,19 @@ pub fn any_value_to_jsonb(value: any_value::Value) -> JsonbValue<'static> {
}
pub fn key_value_to_jsonb(key_values: Vec<KeyValue>) -> JsonbValue<'static> {
- let mut map = BTreeMap::new();
- for kv in key_values {
- let value = match kv.value {
- Some(value) => match value.value {
- Some(value) => any_value_to_jsonb(value),
- None => JsonbValue::Null,
- },
- None => JsonbValue::Null,
- };
- map.insert(kv.key.clone(), value);
- }
- JsonbValue::Object(map)
+ JsonbValue::Object(
+ key_values
+ .into_iter()
+ .map(|kv| {
+ (
+ kv.key,
+ kv.value
+ .and_then(|v| v.value)
+ .map_or(JsonbValue::Null, any_value_to_jsonb),
+ )
+ })
+ .collect(),
+ )
}
#[inline]
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index a74b00ea0f95..81b31fe7676c 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -1905,7 +1905,7 @@ pub async fn test_otlp_logs(store_type: StorageType) {
let client = TestClient::new(app).await;
let content = r#"
-{"resourceLogs":[{"resource":{"attributes":[],"droppedAttributesCount":0},"scopeLogs":[{"scope":{"name":"","version":"","attributes":[],"droppedAttributesCount":0},"logRecords":[{"timeUnixNano":"1736413568497632000","observedTimeUnixNano":"0","severityNumber":9,"severityText":"Info","body":{"stringValue":"the message line one"},"attributes":[{"key":"app","value":{"stringValue":"server"}}],"droppedAttributesCount":0,"flags":0,"traceId":"f665100a612542b69cc362fe2ae9d3bf","spanId":"e58f01c4c69f4488"}],"schemaUrl":""}],"schemaUrl":"https://opentelemetry.io/schemas/1.4.0"},{"resource":{"attributes":[],"droppedAttributesCount":0},"scopeLogs":[{"scope":{"name":"","version":"","attributes":[],"droppedAttributesCount":0},"logRecords":[{"timeUnixNano":"1736413568538897000","observedTimeUnixNano":"0","severityNumber":9,"severityText":"Info","body":{"stringValue":"the message line two"},"attributes":[{"key":"app","value":{"stringValue":"server"}}],"droppedAttributesCount":0,"flags":0,"traceId":"f665100a612542b69cc362fe2ae9d3bf","spanId":"e58f01c4c69f4488"}],"schemaUrl":""}],"schemaUrl":"https://opentelemetry.io/schemas/1.4.0"}]}
+{"resourceLogs":[{"resource":{"attributes":[],"droppedAttributesCount":0},"scopeLogs":[{"scope":{"name":"","version":"","attributes":[{"key":"instance_num","value":{"stringValue":"10"}}],"droppedAttributesCount":0},"logRecords":[{"timeUnixNano":"1736413568497632000","observedTimeUnixNano":"0","severityNumber":9,"severityText":"Info","body":{"stringValue":"the message line one"},"attributes":[{"key":"app","value":{"stringValue":"server1"}}],"droppedAttributesCount":0,"flags":0,"traceId":"f665100a612542b69cc362fe2ae9d3bf","spanId":"e58f01c4c69f4488"}],"schemaUrl":""}],"schemaUrl":"https://opentelemetry.io/schemas/1.4.0"},{"resource":{"attributes":[],"droppedAttributesCount":0},"scopeLogs":[{"scope":{"name":"","version":"","attributes":[],"droppedAttributesCount":0},"logRecords":[{"timeUnixNano":"1736413568538897000","observedTimeUnixNano":"0","severityNumber":9,"severityText":"Info","body":{"stringValue":"the message line two"},"attributes":[{"key":"app","value":{"stringValue":"server2"}}],"droppedAttributesCount":0,"flags":0,"traceId":"f665100a612542b69cc362fe2ae9d3bf","spanId":"e58f01c4c69f4488"}],"schemaUrl":""}],"schemaUrl":"https://opentelemetry.io/schemas/1.4.0"}]}
"#;
let req: ExportLogsServiceRequest = serde_json::from_str(content).unwrap();
@@ -1913,6 +1913,30 @@ pub async fn test_otlp_logs(store_type: StorageType) {
{
// write log data
+ let res = send_req(
+ &client,
+ vec![(
+ HeaderName::from_static("content-type"),
+ HeaderValue::from_static("application/x-protobuf"),
+ )],
+ "/v1/otlp/v1/logs?db=public",
+ body.clone(),
+ false,
+ )
+ .await;
+ assert_eq!(StatusCode::OK, res.status());
+ let expected = "[[1736413568497632000,\"f665100a612542b69cc362fe2ae9d3bf\",\"e58f01c4c69f4488\",\"Info\",9,\"the message line one\",{\"app\":\"server1\"},0,\"\",\"\",{\"instance_num\":\"10\"},\"\",{},\"https://opentelemetry.io/schemas/1.4.0\"],[1736413568538897000,\"f665100a612542b69cc362fe2ae9d3bf\",\"e58f01c4c69f4488\",\"Info\",9,\"the message line two\",{\"app\":\"server2\"},0,\"\",\"\",{},\"\",{},\"https://opentelemetry.io/schemas/1.4.0\"]]";
+ validate_data(
+ "otlp_logs",
+ &client,
+ "select * from opentelemetry_logs;",
+ expected,
+ )
+ .await;
+ }
+
+ {
+ // write log data with selector
let res = send_req(
&client,
vec![
@@ -1922,7 +1946,11 @@ pub async fn test_otlp_logs(store_type: StorageType) {
),
(
HeaderName::from_static("x-greptime-log-table-name"),
- HeaderValue::from_static("logs1"),
+ HeaderValue::from_static("cus_logs"),
+ ),
+ (
+ HeaderName::from_static("x-greptime-log-extract-keys"),
+ HeaderValue::from_static("resource-attr,instance_num,app,not-exist"),
),
],
"/v1/otlp/v1/logs?db=public",
@@ -1931,12 +1959,24 @@ pub async fn test_otlp_logs(store_type: StorageType) {
)
.await;
assert_eq!(StatusCode::OK, res.status());
- let expected = "[[1736413568497632000,\"f665100a612542b69cc362fe2ae9d3bf\",\"e58f01c4c69f4488\",\"Info\",9,\"the message line one\",{\"app\":\"server\"},0,\"\",\"\",{},\"\",{},\"https://opentelemetry.io/schemas/1.4.0\"],[1736413568538897000,\"f665100a612542b69cc362fe2ae9d3bf\",\"e58f01c4c69f4488\",\"Info\",9,\"the message line two\",{\"app\":\"server\"},0,\"\",\"\",{},\"\",{},\"https://opentelemetry.io/schemas/1.4.0\"]]";
- validate_data("otlp_logs", &client, "select * from logs1;", expected).await;
+
+ let expected = "[[1736413568538897000,\"f665100a612542b69cc362fe2ae9d3bf\",\"e58f01c4c69f4488\",\"Info\",9,\"the message line two\",{\"app\":\"server2\"},0,\"\",\"\",{},\"\",{},\"https://opentelemetry.io/schemas/1.4.0\",null,\"server2\"],[1736413568497632000,\"f665100a612542b69cc362fe2ae9d3bf\",\"e58f01c4c69f4488\",\"Info\",9,\"the message line one\",{\"app\":\"server1\"},0,\"\",\"\",{\"instance_num\":\"10\"},\"\",{},\"https://opentelemetry.io/schemas/1.4.0\",\"10\",\"server1\"]]";
+ validate_data(
+ "otlp_logs_with_selector",
+ &client,
+ "select * from cus_logs;",
+ expected,
+ )
+ .await;
}
{
- // write log data with selector
+ // test same selector with multiple value
+ let content = r#"
+ {"resourceLogs":[{"resource":{"attributes":[{"key":"fromwhere","value":{"stringValue":"resource"}}],"droppedAttributesCount":0},"scopeLogs":[{"scope":{"name":"","version":"","attributes":[{"key":"fromwhere","value":{"stringValue":"scope"}}],"droppedAttributesCount":0},"logRecords":[{"timeUnixNano":"1736413568497632000","observedTimeUnixNano":"0","severityNumber":9,"severityText":"Info","body":{"stringValue":"the message line one"},"attributes":[{"key":"app","value":{"stringValue":"server"}},{"key":"fromwhere","value":{"stringValue":"log_attr"}}],"droppedAttributesCount":0,"flags":0,"traceId":"f665100a612542b69cc362fe2ae9d3bf","spanId":"e58f01c4c69f4488"}],"schemaUrl":""}],"schemaUrl":"https://opentelemetry.io/schemas/1.4.0"}]}
+ "#;
+ let req: ExportLogsServiceRequest = serde_json::from_str(content).unwrap();
+ let body = req.encode_to_vec();
let res = send_req(
&client,
vec![
@@ -1946,11 +1986,11 @@ pub async fn test_otlp_logs(store_type: StorageType) {
),
(
HeaderName::from_static("x-greptime-log-table-name"),
- HeaderValue::from_static("logs"),
+ HeaderValue::from_static("logs2"),
),
(
HeaderName::from_static("x-greptime-log-extract-keys"),
- HeaderValue::from_static("resource-attr,instance_num,app,not-exist"),
+ HeaderValue::from_static("fromwhere"),
),
],
"/v1/otlp/v1/logs?db=public",
@@ -1960,11 +2000,11 @@ pub async fn test_otlp_logs(store_type: StorageType) {
.await;
assert_eq!(StatusCode::OK, res.status());
- let expected = "[[1736413568497632000,\"f665100a612542b69cc362fe2ae9d3bf\",\"e58f01c4c69f4488\",\"Info\",9,\"the message line one\",{\"app\":\"server\"},0,\"\",\"\",{},\"\",{},\"https://opentelemetry.io/schemas/1.4.0\",\"server\"],[1736413568538897000,\"f665100a612542b69cc362fe2ae9d3bf\",\"e58f01c4c69f4488\",\"Info\",9,\"the message line two\",{\"app\":\"server\"},0,\"\",\"\",{},\"\",{},\"https://opentelemetry.io/schemas/1.4.0\",\"server\"]]";
+ let expected = "[[1736413568497632000,\"f665100a612542b69cc362fe2ae9d3bf\",\"e58f01c4c69f4488\",\"Info\",9,\"the message line one\",{\"app\":\"server\",\"fromwhere\":\"log_attr\"},0,\"\",\"\",{\"fromwhere\":\"scope\"},\"\",{\"fromwhere\":\"resource\"},\"https://opentelemetry.io/schemas/1.4.0\",\"log_attr\"]]";
validate_data(
- "otlp_logs_with_selector",
+ "otlp_logs_with_selector_overlapping",
&client,
- "select * from logs;",
+ "select * from logs2;",
expected,
)
.await;
|
refactor
|
otlp logs insertion (#5479)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.